BrainRoster commited on
Commit
90855fe
1 Parent(s): 4f33e04

Upload PPO LunarLander-v2 trained agent

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ model-index:
16
  type: LunarLander-v2
17
  metrics:
18
  - type: mean_reward
19
- value: -291.61 +/- 200.18
20
  name: mean_reward
21
  verified: false
22
  ---
 
16
  type: LunarLander-v2
17
  metrics:
18
  - type: mean_reward
19
+ value: -513.91 +/- 72.78
20
  name: mean_reward
21
  verified: false
22
  ---
config.json CHANGED
@@ -1 +1 @@
1
- {"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCURRTlBvbGljeZSTlC4=", "__module__": "stable_baselines3.dqn.policies", "__annotations__": "{'q_net': <class 'stable_baselines3.dqn.policies.QNetwork'>, 'q_net_target': <class 'stable_baselines3.dqn.policies.QNetwork'>}", "__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function DQNPolicy.__init__ at 0x000001C9A1381760>", "_build": "<function DQNPolicy._build at 0x000001C9A1381800>", "make_q_net": "<function DQNPolicy.make_q_net at 0x000001C9A13818A0>", "forward": "<function DQNPolicy.forward at 0x000001C9A1381940>", "_predict": "<function DQNPolicy._predict at 0x000001C9A13819E0>", "_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x000001C9A1381A80>", "set_training_mode": "<function DQNPolicy.set_training_mode at 0x000001C9A1381B20>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x000001C9A137B600>"}, "verbose": 1, "policy_kwargs": {"net_arch": [256, 256]}, "num_timesteps": 12, "_total_timesteps": 10, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1685348680573278900, "learning_rate": 0.005, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVlQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYgAAAAAAAAAM0QbLxBBbs/9VYNvrdRTz37JQk92oLNPQAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwiGlIwBQ5R0lFKULg=="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdAAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYBAAAAAAAAAAGUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwGFlIwBQ5R0lFKULg=="}, "_last_original_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVlQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYgAAAAAAAAAABIVrzc37o/iy8DvtupnT28Muk8Lal4PQAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwiGlIwBQ5R0lFKULg=="}, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -0.19999999999999996, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 0, "buffer_size": 1000000, "batch_size": 256, "learning_starts": 50000, "tau": 1.0, "gamma": 0.99, "gradient_steps": 1, "optimize_memory_usage": false, "replay_buffer_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==", "__module__": "stable_baselines3.common.buffers", "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ", "__init__": "<function ReplayBuffer.__init__ at 0x000001C9A13593A0>", "add": "<function ReplayBuffer.add at 0x000001C9A13594E0>", "sample": "<function ReplayBuffer.sample at 0x000001C9A1359580>", "_get_samples": "<function ReplayBuffer._get_samples at 0x000001C9A1359620>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x000001C9A135D080>"}, "replay_buffer_kwargs": {}, "train_freq": {":type:": "<class 'stable_baselines3.common.type_aliases.TrainFreq'>", ":serialized:": "gAWVeAAAAAAAAACMJXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi50eXBlX2FsaWFzZXOUjAlUcmFpbkZyZXGUk5RLBIwIYnVpbHRpbnOUjAdnZXRhdHRylJOUaACMElRyYWluRnJlcXVlbmN5VW5pdJSTlIwEU1RFUJSGlFKUhpSBlC4="}, "use_sde_at_warmup": false, "exploration_initial_eps": 1.0, "exploration_final_eps": 0.1, "exploration_fraction": 0.2, "target_update_interval": 10000, "_n_calls": 12, "max_grad_norm": 10, "exploration_rate": 0.1, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVdgIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoCIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoESiWCAAAAAAAAAABAQEBAQEBAZRoFUsIhZRoGXSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBEoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaAtLCIWUaBl0lFKUjARoaWdolGgRKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgLSwiFlGgZdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_shape": [8], "low": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "low_repr": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high_repr": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWVqQEAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgLjAJpOJSJiIeUUpQoSwNoD05OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZSMFG51bXB5LnJhbmRvbS5fcGlja2xllIwQX19nZW5lcmF0b3JfY3RvcpSTlIwFUENHNjSUhZRSlH2UKIwNYml0X2dlbmVyYXRvcpSMBVBDRzY0lIwFc3RhdGWUfZQoaCiKEPom8kQEtrC/zWpdnbaYXBOMA2luY5SKEY23l0D5PtE22CdbF/70wawAdYwKaGFzX3VpbnQzMpRLAIwIdWludGVnZXKUSuvCznp1YnViLg==", "n": "4", "start": "0", "_shape": [], "dtype": "int64", "_np_random": "Generator(PCG64)"}, "n_envs": 1, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWV6gIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwiVAZcAiQFTAJROhZQpjAFflIWUjKlDOlxVc2Vyc1xtZVxBcHBEYXRhXExvY2FsXFBhY2thZ2VzXFB5dGhvblNvZnR3YXJlRm91bmRhdGlvbi5QeXRob24uMy4xMV9xYno1bjJrZnJhOHAwXExvY2FsQ2FjaGVcbG9jYWwtcGFja2FnZXNcUHl0aG9uMzExXHNpdGUtcGFja2FnZXNcc3RhYmxlX2Jhc2VsaW5lczNcY29tbW9uXHV0aWxzLnB5lIwEZnVuY5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUS4RDCPiAANgPEogKlEMAlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5RoDHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaCB9lH2UKGgYaA2MDF9fcXVhbG5hbWVfX5RoDowPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoGYwHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/dHrhR64Ue4WUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "batch_norm_stats": [], "batch_norm_stats_target": [], "exploration_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVxgMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLBEsTQzyVA5cAZAF8AHoKAACJAmsEAAAAAHICiQFTAIkDZAF8AHoKAACJAYkDegoAAHoFAACJAnoLAAB6AAAAUwCUTksBhpQpjBJwcm9ncmVzc19yZW1haW5pbmeUhZSMqUM6XFVzZXJzXG1lXEFwcERhdGFcTG9jYWxcUGFja2FnZXNcUHl0aG9uU29mdHdhcmVGb3VuZGF0aW9uLlB5dGhvbi4zLjExX3FiejVuMmtmcmE4cDBcTG9jYWxDYWNoZVxsb2NhbC1wYWNrYWdlc1xQeXRob24zMTFcc2l0ZS1wYWNrYWdlc1xzdGFibGVfYmFzZWxpbmVzM1xjb21tb25cdXRpbHMucHmUjARmdW5jlIwbZ2V0X2xpbmVhcl9mbi48bG9jYWxzPi5mdW5jlEtyQzj4gADYDA3QECLRDCKgbNILMtALMtgTFohK4BMYmEHQIDLRHDKwc7hVsXvRG0PAbNEbUtETUtAMUpRDAJSMA2VuZJSMDGVuZF9mcmFjdGlvbpSMBXN0YXJ0lIeUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5RoDHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUaB4pUpRoHilSlIeUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgkfZR9lChoGmgNjAxfX3F1YWxuYW1lX1+UaA6MD19fYW5ub3RhdGlvbnNfX5R9lChoCowIYnVpbHRpbnOUjAVmbG9hdJSTlIwGcmV0dXJulGgvdYwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBuMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP7mZmZmZmZqFlFKUaDdHP8mZmZmZmZqFlFKUaDdHP/AAAAAAAACFlFKUh5SMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="}, "system_info": {"OS": "Windows-10-10.0.22621-SP0 10.0.22621", "Python": "3.11.3", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.0.1+cpu", "GPU Enabled": "False", "Numpy": "1.23.5", "Cloudpickle": "2.2.1", "Gymnasium": "0.28.1", "OpenAI Gym": "0.25.2"}}
 
1
+ {"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCURRTlBvbGljeZSTlC4=", "__module__": "stable_baselines3.dqn.policies", "__annotations__": "{'q_net': <class 'stable_baselines3.dqn.policies.QNetwork'>, 'q_net_target': <class 'stable_baselines3.dqn.policies.QNetwork'>}", "__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function DQNPolicy.__init__ at 0x000001B459681760>", "_build": "<function DQNPolicy._build at 0x000001B459681800>", "make_q_net": "<function DQNPolicy.make_q_net at 0x000001B4596818A0>", "forward": "<function DQNPolicy.forward at 0x000001B459681940>", "_predict": "<function DQNPolicy._predict at 0x000001B4596819E0>", "_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x000001B459681A80>", "set_training_mode": "<function DQNPolicy.set_training_mode at 0x000001B459681B20>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x000001B45967AA40>"}, "verbose": 1, "policy_kwargs": {"net_arch": [256, 256]}, "num_timesteps": 5000000, "_total_timesteps": 5000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1685348723093694800, "learning_rate": 0.005, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVlQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYgAAAAAAAAAJpOWz3kJaU/CvhjvksTfb7XR38+zep3PgAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwiGlIwBQ5R0lFKULg=="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdAAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYBAAAAAAAAAAGUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwGFlIwBQ5R0lFKULg=="}, "_last_original_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVlQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYgAAAAAAAAADPwYz2n2qU/AjBavlxbYL6A4nI+BlZPPgAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwiGlIwBQ5R0lFKULg=="}, "_episode_num": 28912, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": 0.0, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 1237500, "buffer_size": 1000000, "batch_size": 256, "learning_starts": 50000, "tau": 1.0, "gamma": 0.99, "gradient_steps": 1, "optimize_memory_usage": false, "replay_buffer_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==", "__module__": "stable_baselines3.common.buffers", "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ", "__init__": "<function ReplayBuffer.__init__ at 0x000001B4596593A0>", "add": "<function ReplayBuffer.add at 0x000001B4596594E0>", "sample": "<function ReplayBuffer.sample at 0x000001B459659580>", "_get_samples": "<function ReplayBuffer._get_samples at 0x000001B459659620>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x000001B459292F00>"}, "replay_buffer_kwargs": {}, "train_freq": {":type:": "<class 'stable_baselines3.common.type_aliases.TrainFreq'>", ":serialized:": "gAWVeAAAAAAAAACMJXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi50eXBlX2FsaWFzZXOUjAlUcmFpbkZyZXGUk5RLBIwIYnVpbHRpbnOUjAdnZXRhdHRylJOUaACMElRyYWluRnJlcXVlbmN5VW5pdJSTlIwEU1RFUJSGlFKUhpSBlC4="}, "use_sde_at_warmup": false, "exploration_initial_eps": 1.0, "exploration_final_eps": 0.1, "exploration_fraction": 0.2, "target_update_interval": 10000, "_n_calls": 5000000, "max_grad_norm": 10, "exploration_rate": 0.1, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVdgIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoCIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoESiWCAAAAAAAAAABAQEBAQEBAZRoFUsIhZRoGXSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBEoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaAtLCIWUaBl0lFKUjARoaWdolGgRKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgLSwiFlGgZdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_shape": [8], "low": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "low_repr": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high_repr": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWVrAEAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgLjAJpOJSJiIeUUpQoSwNoD05OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZSMFG51bXB5LnJhbmRvbS5fcGlja2xllIwQX19nZW5lcmF0b3JfY3RvcpSTlIwFUENHNjSUhZRSlH2UKIwNYml0X2dlbmVyYXRvcpSMBVBDRzY0lIwFc3RhdGWUfZQoaCiKEdavZ4JBAFsIk7RnCJF1wZwAjANpbmOUihF1zTCusWddDJmc+1trjDezAHWMCmhhc191aW50MzKUSwCMCHVpbnRlZ2VylIoFNgAUoQB1YnViLg==", "n": "4", "start": "0", "_shape": [], "dtype": "int64", "_np_random": "Generator(PCG64)"}, "n_envs": 1, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWV6gIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwiVAZcAiQFTAJROhZQpjAFflIWUjKlDOlxVc2Vyc1xtZVxBcHBEYXRhXExvY2FsXFBhY2thZ2VzXFB5dGhvblNvZnR3YXJlRm91bmRhdGlvbi5QeXRob24uMy4xMV9xYno1bjJrZnJhOHAwXExvY2FsQ2FjaGVcbG9jYWwtcGFja2FnZXNcUHl0aG9uMzExXHNpdGUtcGFja2FnZXNcc3RhYmxlX2Jhc2VsaW5lczNcY29tbW9uXHV0aWxzLnB5lIwEZnVuY5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUS4RDCPiAANgPEogKlEMAlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5RoDHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaCB9lH2UKGgYaA2MDF9fcXVhbG5hbWVfX5RoDowPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoGYwHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/dHrhR64Ue4WUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "batch_norm_stats": [], "batch_norm_stats_target": [], "exploration_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVxgMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLBEsTQzyVA5cAZAF8AHoKAACJAmsEAAAAAHICiQFTAIkDZAF8AHoKAACJAYkDegoAAHoFAACJAnoLAAB6AAAAUwCUTksBhpQpjBJwcm9ncmVzc19yZW1haW5pbmeUhZSMqUM6XFVzZXJzXG1lXEFwcERhdGFcTG9jYWxcUGFja2FnZXNcUHl0aG9uU29mdHdhcmVGb3VuZGF0aW9uLlB5dGhvbi4zLjExX3FiejVuMmtmcmE4cDBcTG9jYWxDYWNoZVxsb2NhbC1wYWNrYWdlc1xQeXRob24zMTFcc2l0ZS1wYWNrYWdlc1xzdGFibGVfYmFzZWxpbmVzM1xjb21tb25cdXRpbHMucHmUjARmdW5jlIwbZ2V0X2xpbmVhcl9mbi48bG9jYWxzPi5mdW5jlEtyQzj4gADYDA3QECLRDCKgbNILMtALMtgTFohK4BMYmEHQIDLRHDKwc7hVsXvRG0PAbNEbUtETUtAMUpRDAJSMA2VuZJSMDGVuZF9mcmFjdGlvbpSMBXN0YXJ0lIeUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5RoDHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUaB4pUpRoHilSlIeUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgkfZR9lChoGmgNjAxfX3F1YWxuYW1lX1+UaA6MD19fYW5ub3RhdGlvbnNfX5R9lChoCowIYnVpbHRpbnOUjAVmbG9hdJSTlIwGcmV0dXJulGgvdYwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBuMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP7mZmZmZmZqFlFKUaDdHP8mZmZmZmZqFlFKUaDdHP/AAAAAAAACFlFKUh5SMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="}, "system_info": {"OS": "Windows-10-10.0.22621-SP0 10.0.22621", "Python": "3.11.3", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.0.1+cpu", "GPU Enabled": "False", "Numpy": "1.23.5", "Cloudpickle": "2.2.1", "Gymnasium": "0.28.1", "OpenAI Gym": "0.25.2"}}
ppo-LunarLander-v2.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd2407c821d26c405577d63def05360cfc65c8ea09c3e1e1fbe2b26b916adc3f
3
- size 570241
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c805875f8560ea295e28e6051512dc709753fc9c3228292759e631dc4059f6e
3
+ size 1127373
ppo-LunarLander-v2/data CHANGED
@@ -5,15 +5,15 @@
5
  "__module__": "stable_baselines3.dqn.policies",
6
  "__annotations__": "{'q_net': <class 'stable_baselines3.dqn.policies.QNetwork'>, 'q_net_target': <class 'stable_baselines3.dqn.policies.QNetwork'>}",
7
  "__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
8
- "__init__": "<function DQNPolicy.__init__ at 0x000001C9A1381760>",
9
- "_build": "<function DQNPolicy._build at 0x000001C9A1381800>",
10
- "make_q_net": "<function DQNPolicy.make_q_net at 0x000001C9A13818A0>",
11
- "forward": "<function DQNPolicy.forward at 0x000001C9A1381940>",
12
- "_predict": "<function DQNPolicy._predict at 0x000001C9A13819E0>",
13
- "_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x000001C9A1381A80>",
14
- "set_training_mode": "<function DQNPolicy.set_training_mode at 0x000001C9A1381B20>",
15
  "__abstractmethods__": "frozenset()",
16
- "_abc_impl": "<_abc._abc_data object at 0x000001C9A137B600>"
17
  },
18
  "verbose": 1,
19
  "policy_kwargs": {
@@ -22,17 +22,17 @@
22
  256
23
  ]
24
  },
25
- "num_timesteps": 12,
26
- "_total_timesteps": 10,
27
  "_num_timesteps_at_start": 0,
28
  "seed": null,
29
  "action_noise": null,
30
- "start_time": 1685348680573278900,
31
  "learning_rate": 0.005,
32
  "tensorboard_log": null,
33
  "_last_obs": {
34
  ":type:": "<class 'numpy.ndarray'>",
35
- ":serialized:": "gAWVlQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYgAAAAAAAAAM0QbLxBBbs/9VYNvrdRTz37JQk92oLNPQAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwiGlIwBQ5R0lFKULg=="
36
  },
37
  "_last_episode_starts": {
38
  ":type:": "<class 'numpy.ndarray'>",
@@ -40,12 +40,12 @@
40
  },
41
  "_last_original_obs": {
42
  ":type:": "<class 'numpy.ndarray'>",
43
- ":serialized:": "gAWVlQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYgAAAAAAAAAABIVrzc37o/iy8DvtupnT28Muk8Lal4PQAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwiGlIwBQ5R0lFKULg=="
44
  },
45
- "_episode_num": 0,
46
  "use_sde": false,
47
  "sde_sample_freq": -1,
48
- "_current_progress_remaining": -0.19999999999999996,
49
  "_stats_window_size": 100,
50
  "ep_info_buffer": {
51
  ":type:": "<class 'collections.deque'>",
@@ -55,7 +55,7 @@
55
  ":type:": "<class 'collections.deque'>",
56
  ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
57
  },
58
- "_n_updates": 0,
59
  "buffer_size": 1000000,
60
  "batch_size": 256,
61
  "learning_starts": 50000,
@@ -68,12 +68,12 @@
68
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
69
  "__module__": "stable_baselines3.common.buffers",
70
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
71
- "__init__": "<function ReplayBuffer.__init__ at 0x000001C9A13593A0>",
72
- "add": "<function ReplayBuffer.add at 0x000001C9A13594E0>",
73
- "sample": "<function ReplayBuffer.sample at 0x000001C9A1359580>",
74
- "_get_samples": "<function ReplayBuffer._get_samples at 0x000001C9A1359620>",
75
  "__abstractmethods__": "frozenset()",
76
- "_abc_impl": "<_abc._abc_data object at 0x000001C9A135D080>"
77
  },
78
  "replay_buffer_kwargs": {},
79
  "train_freq": {
@@ -85,7 +85,7 @@
85
  "exploration_final_eps": 0.1,
86
  "exploration_fraction": 0.2,
87
  "target_update_interval": 10000,
88
- "_n_calls": 12,
89
  "max_grad_norm": 10,
90
  "exploration_rate": 0.1,
91
  "observation_space": {
@@ -105,7 +105,7 @@
105
  },
106
  "action_space": {
107
  ":type:": "<class 'gymnasium.spaces.discrete.Discrete'>",
108
- ":serialized:": "gAWVqQEAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgLjAJpOJSJiIeUUpQoSwNoD05OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZSMFG51bXB5LnJhbmRvbS5fcGlja2xllIwQX19nZW5lcmF0b3JfY3RvcpSTlIwFUENHNjSUhZRSlH2UKIwNYml0X2dlbmVyYXRvcpSMBVBDRzY0lIwFc3RhdGWUfZQoaCiKEPom8kQEtrC/zWpdnbaYXBOMA2luY5SKEY23l0D5PtE22CdbF/70wawAdYwKaGFzX3VpbnQzMpRLAIwIdWludGVnZXKUSuvCznp1YnViLg==",
109
  "n": "4",
110
  "start": "0",
111
  "_shape": [],
 
5
  "__module__": "stable_baselines3.dqn.policies",
6
  "__annotations__": "{'q_net': <class 'stable_baselines3.dqn.policies.QNetwork'>, 'q_net_target': <class 'stable_baselines3.dqn.policies.QNetwork'>}",
7
  "__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
8
+ "__init__": "<function DQNPolicy.__init__ at 0x000001B459681760>",
9
+ "_build": "<function DQNPolicy._build at 0x000001B459681800>",
10
+ "make_q_net": "<function DQNPolicy.make_q_net at 0x000001B4596818A0>",
11
+ "forward": "<function DQNPolicy.forward at 0x000001B459681940>",
12
+ "_predict": "<function DQNPolicy._predict at 0x000001B4596819E0>",
13
+ "_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x000001B459681A80>",
14
+ "set_training_mode": "<function DQNPolicy.set_training_mode at 0x000001B459681B20>",
15
  "__abstractmethods__": "frozenset()",
16
+ "_abc_impl": "<_abc._abc_data object at 0x000001B45967AA40>"
17
  },
18
  "verbose": 1,
19
  "policy_kwargs": {
 
22
  256
23
  ]
24
  },
25
+ "num_timesteps": 5000000,
26
+ "_total_timesteps": 5000000,
27
  "_num_timesteps_at_start": 0,
28
  "seed": null,
29
  "action_noise": null,
30
+ "start_time": 1685348723093694800,
31
  "learning_rate": 0.005,
32
  "tensorboard_log": null,
33
  "_last_obs": {
34
  ":type:": "<class 'numpy.ndarray'>",
35
+ ":serialized:": "gAWVlQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYgAAAAAAAAAJpOWz3kJaU/CvhjvksTfb7XR38+zep3PgAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwiGlIwBQ5R0lFKULg=="
36
  },
37
  "_last_episode_starts": {
38
  ":type:": "<class 'numpy.ndarray'>",
 
40
  },
41
  "_last_original_obs": {
42
  ":type:": "<class 'numpy.ndarray'>",
43
+ ":serialized:": "gAWVlQAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYgAAAAAAAAADPwYz2n2qU/AjBavlxbYL6A4nI+BlZPPgAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksBSwiGlIwBQ5R0lFKULg=="
44
  },
45
+ "_episode_num": 28912,
46
  "use_sde": false,
47
  "sde_sample_freq": -1,
48
+ "_current_progress_remaining": 0.0,
49
  "_stats_window_size": 100,
50
  "ep_info_buffer": {
51
  ":type:": "<class 'collections.deque'>",
 
55
  ":type:": "<class 'collections.deque'>",
56
  ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
57
  },
58
+ "_n_updates": 1237500,
59
  "buffer_size": 1000000,
60
  "batch_size": 256,
61
  "learning_starts": 50000,
 
68
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
69
  "__module__": "stable_baselines3.common.buffers",
70
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
71
+ "__init__": "<function ReplayBuffer.__init__ at 0x000001B4596593A0>",
72
+ "add": "<function ReplayBuffer.add at 0x000001B4596594E0>",
73
+ "sample": "<function ReplayBuffer.sample at 0x000001B459659580>",
74
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x000001B459659620>",
75
  "__abstractmethods__": "frozenset()",
76
+ "_abc_impl": "<_abc._abc_data object at 0x000001B459292F00>"
77
  },
78
  "replay_buffer_kwargs": {},
79
  "train_freq": {
 
85
  "exploration_final_eps": 0.1,
86
  "exploration_fraction": 0.2,
87
  "target_update_interval": 10000,
88
+ "_n_calls": 5000000,
89
  "max_grad_norm": 10,
90
  "exploration_rate": 0.1,
91
  "observation_space": {
 
105
  },
106
  "action_space": {
107
  ":type:": "<class 'gymnasium.spaces.discrete.Discrete'>",
108
+ ":serialized:": "gAWVrAEAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCmMBWR0eXBllGgLjAJpOJSJiIeUUpQoSwNoD05OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZSMFG51bXB5LnJhbmRvbS5fcGlja2xllIwQX19nZW5lcmF0b3JfY3RvcpSTlIwFUENHNjSUhZRSlH2UKIwNYml0X2dlbmVyYXRvcpSMBVBDRzY0lIwFc3RhdGWUfZQoaCiKEdavZ4JBAFsIk7RnCJF1wZwAjANpbmOUihF1zTCusWddDJmc+1trjDezAHWMCmhhc191aW50MzKUSwCMCHVpbnRlZ2VylIoFNgAUoQB1YnViLg==",
109
  "n": "4",
110
  "start": "0",
111
  "_shape": [],
ppo-LunarLander-v2/policy.optimizer.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:473ed0d01f93f8ad6cc0da7de807be0915008759e251b7b815442cfedc83e7a8
3
- size 687
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a683dbb16f3afcf5270d6269f5cdd5e3cd4e7e1678ca81aee7c7c77155ff290d
3
+ size 557807
ppo-LunarLander-v2/policy.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd43696ca4c9b0799d5ef010621410229b91586f46be6887b52ca2aff90c581b
3
  size 556929
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c9306d16895a864200352a8849afad6cdce9416136fdb9537aad8127737a342
3
  size 556929
replay.mp4 CHANGED
Binary files a/replay.mp4 and b/replay.mp4 differ
 
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": -291.61422386374323, "std_reward": 200.17615757483574, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-05-29T01:24:41.268573"}
 
1
+ {"mean_reward": -513.9115460427565, "std_reward": 72.77832856259383, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-05-29T02:50:18.301520"}