ernestum commited on
Commit
4a4d554
1 Parent(s): 853714d

Initial commit

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: stable-baselines3
3
+ tags:
4
+ - seals/CartPole-v0
5
+ - deep-reinforcement-learning
6
+ - reinforcement-learning
7
+ - stable-baselines3
8
+ model-index:
9
+ - name: PPO
10
+ results:
11
+ - metrics:
12
+ - type: mean_reward
13
+ value: 500.00 +/- 0.00
14
+ name: mean_reward
15
+ task:
16
+ type: reinforcement-learning
17
+ name: reinforcement-learning
18
+ dataset:
19
+ name: seals/CartPole-v0
20
+ type: seals/CartPole-v0
21
+ ---
22
+
23
+ # **PPO** Agent playing **seals/CartPole-v0**
24
+ This is a trained model of a **PPO** agent playing **seals/CartPole-v0**
25
+ using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3)
26
+ and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo).
27
+
28
+ The RL Zoo is a training framework for Stable Baselines3
29
+ reinforcement learning agents,
30
+ with hyperparameter optimization and pre-trained agents included.
31
+
32
+ ## Usage (with SB3 RL Zoo)
33
+
34
+ RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/>
35
+ SB3: https://github.com/DLR-RM/stable-baselines3<br/>
36
+ SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
37
+
38
+ ```
39
+ # Download model and save it into the logs/ folder
40
+ python -m rl_zoo3.load_from_hub --algo ppo --env seals/CartPole-v0 -orga HumanCompatibleAI -f logs/
41
+ python enjoy.py --algo ppo --env seals/CartPole-v0 -f logs/
42
+ ```
43
+
44
+ If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do:
45
+ ```
46
+ python -m rl_zoo3.load_from_hub --algo ppo --env seals/CartPole-v0 -orga HumanCompatibleAI -f logs/
47
+ rl_zoo3 enjoy --algo ppo --env seals/CartPole-v0 -f logs/
48
+ ```
49
+
50
+ ## Training (with the RL Zoo)
51
+ ```
52
+ python train.py --algo ppo --env seals/CartPole-v0 -f logs/
53
+ # Upload the model and generate video (when possible)
54
+ python -m rl_zoo3.push_to_hub --algo ppo --env seals/CartPole-v0 -f logs/ -orga HumanCompatibleAI
55
+ ```
56
+
57
+ ## Hyperparameters
58
+ ```python
59
+ OrderedDict([('batch_size', 256),
60
+ ('clip_range', 0.4),
61
+ ('ent_coef', 0.008508727919228772),
62
+ ('gae_lambda', 0.9),
63
+ ('gamma', 0.9999),
64
+ ('learning_rate', 0.0012403278189645594),
65
+ ('max_grad_norm', 0.8),
66
+ ('n_envs', 8),
67
+ ('n_epochs', 10),
68
+ ('n_steps', 512),
69
+ ('n_timesteps', 100000.0),
70
+ ('policy', 'MlpPolicy'),
71
+ ('policy_kwargs',
72
+ {'activation_fn': <class 'torch.nn.modules.activation.ReLU'>,
73
+ 'net_arch': [{'pi': [64, 64], 'vf': [64, 64]}]}),
74
+ ('vf_coef', 0.489343896591493),
75
+ ('normalize', False)])
76
+ ```
args.yml ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !!python/object/apply:collections.OrderedDict
2
+ - - - algo
3
+ - ppo
4
+ - - conf_file
5
+ - hyperparams/python/ppo.py
6
+ - - device
7
+ - cpu
8
+ - - env
9
+ - seals/CartPole-v0
10
+ - - env_kwargs
11
+ - null
12
+ - - eval_episodes
13
+ - 5
14
+ - - eval_freq
15
+ - 25000
16
+ - - gym_packages
17
+ - - seals
18
+ - - hyperparams
19
+ - null
20
+ - - log_folder
21
+ - logs
22
+ - - log_interval
23
+ - -1
24
+ - - max_total_trials
25
+ - null
26
+ - - n_eval_envs
27
+ - 1
28
+ - - n_evaluations
29
+ - null
30
+ - - n_jobs
31
+ - 1
32
+ - - n_startup_trials
33
+ - 10
34
+ - - n_timesteps
35
+ - -1
36
+ - - n_trials
37
+ - 500
38
+ - - no_optim_plots
39
+ - false
40
+ - - num_threads
41
+ - 1
42
+ - - optimization_log_path
43
+ - null
44
+ - - optimize_hyperparameters
45
+ - false
46
+ - - progress
47
+ - false
48
+ - - pruner
49
+ - median
50
+ - - sampler
51
+ - tpe
52
+ - - save_freq
53
+ - -1
54
+ - - save_replay_buffer
55
+ - false
56
+ - - seed
57
+ - 7
58
+ - - storage
59
+ - null
60
+ - - study_name
61
+ - null
62
+ - - tensorboard_log
63
+ - runs/seals/CartPole-v0__ppo__7__1670516892
64
+ - - track
65
+ - true
66
+ - - trained_agent
67
+ - ''
68
+ - - truncate_last_trajectory
69
+ - true
70
+ - - uuid
71
+ - false
72
+ - - vec_env
73
+ - dummy
74
+ - - verbose
75
+ - 1
76
+ - - wandb_entity
77
+ - ernestum
78
+ - - wandb_project_name
79
+ - seals-experts-normalized
80
+ - - yaml_file
81
+ - null
config.yml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !!python/object/apply:collections.OrderedDict
2
+ - - - batch_size
3
+ - 256
4
+ - - clip_range
5
+ - 0.4
6
+ - - ent_coef
7
+ - 0.008508727919228772
8
+ - - gae_lambda
9
+ - 0.9
10
+ - - gamma
11
+ - 0.9999
12
+ - - learning_rate
13
+ - 0.0012403278189645594
14
+ - - max_grad_norm
15
+ - 0.8
16
+ - - n_envs
17
+ - 8
18
+ - - n_epochs
19
+ - 10
20
+ - - n_steps
21
+ - 512
22
+ - - n_timesteps
23
+ - 100000.0
24
+ - - policy
25
+ - MlpPolicy
26
+ - - policy_kwargs
27
+ - activation_fn: !!python/name:torch.nn.modules.activation.ReLU ''
28
+ net_arch:
29
+ - pi:
30
+ - 64
31
+ - 64
32
+ vf:
33
+ - 64
34
+ - 64
35
+ - - vf_coef
36
+ - 0.489343896591493
env_kwargs.yml ADDED
@@ -0,0 +1 @@
 
1
+ {}
ppo-seals-CartPole-v0.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdd3c7bf2dd46d22c904d7e34142fcc7334a2600f415fa247e80e88ecc7c9376
3
+ size 141656
ppo-seals-CartPole-v0/_stable_baselines3_version ADDED
@@ -0,0 +1 @@
 
1
+ 1.6.2
ppo-seals-CartPole-v0/data ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "policy_class": {
3
+ ":type:": "<class 'abc.ABCMeta'>",
4
+ ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
5
+ "__module__": "stable_baselines3.common.policies",
6
+ "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function ActorCriticPolicy.__init__ at 0x7fe4b5b38700>",
8
+ "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7fe4b5b38790>",
9
+ "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7fe4b5b38820>",
10
+ "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7fe4b5b388b0>",
11
+ "_build": "<function ActorCriticPolicy._build at 0x7fe4b5b38940>",
12
+ "forward": "<function ActorCriticPolicy.forward at 0x7fe4b5b389d0>",
13
+ "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7fe4b5b38a60>",
14
+ "_predict": "<function ActorCriticPolicy._predict at 0x7fe4b5b38af0>",
15
+ "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7fe4b5b38b80>",
16
+ "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7fe4b5b38c10>",
17
+ "predict_values": "<function ActorCriticPolicy.predict_values at 0x7fe4b5b38ca0>",
18
+ "__abstractmethods__": "frozenset()",
19
+ "_abc_impl": "<_abc_data object at 0x7fe4b5b2fba0>"
20
+ },
21
+ "verbose": 1,
22
+ "policy_kwargs": {
23
+ ":type:": "<class 'dict'>",
24
+ ":serialized:": "gAWVaAAAAAAAAAB9lCiMDWFjdGl2YXRpb25fZm6UjBt0b3JjaC5ubi5tb2R1bGVzLmFjdGl2YXRpb26UjARSZUxVlJOUjAhuZXRfYXJjaJRdlH2UKIwCcGmUXZQoS0BLQGWMAnZmlF2UKEtAS0BldWF1Lg==",
25
+ "activation_fn": "<class 'torch.nn.modules.activation.ReLU'>",
26
+ "net_arch": [
27
+ {
28
+ "pi": [
29
+ 64,
30
+ 64
31
+ ],
32
+ "vf": [
33
+ 64,
34
+ 64
35
+ ]
36
+ }
37
+ ]
38
+ },
39
+ "observation_space": {
40
+ ":type:": "<class 'gym.spaces.box.Box'>",
41
+ ":serialized:": "gAWVdwEAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLBIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWEAAAAAAAAAD//3////9//9sPScD//3//lGgKSwSFlIwBQ5R0lFKUjARoaWdolGgSKJYQAAAAAAAAAP//f3///39/2w9JQP//f3+UaApLBIWUaBV0lFKUjA1ib3VuZGVkX2JlbG93lGgSKJYEAAAAAAAAAAEBAQGUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLBIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYEAAAAAAAAAAEBAQGUaCFLBIWUaBV0lFKUjApfbnBfcmFuZG9tlE51Yi4=",
42
+ "dtype": "float32",
43
+ "_shape": [
44
+ 4
45
+ ],
46
+ "low": "[-3.4028235e+38 -3.4028235e+38 -3.1415927e+00 -3.4028235e+38]",
47
+ "high": "[3.4028235e+38 3.4028235e+38 3.1415927e+00 3.4028235e+38]",
48
+ "bounded_below": "[ True True True True]",
49
+ "bounded_above": "[ True True True True]",
50
+ "_np_random": null
51
+ },
52
+ "action_space": {
53
+ ":type:": "<class 'gym.spaces.discrete.Discrete'>",
54
+ ":serialized:": "gAWVLwsAAAAAAACME2d5bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpRLAowGX3NoYXBllCmMBWR0eXBllIwFbnVtcHmUaAeTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZSMFG51bXB5LnJhbmRvbS5fcGlja2xllIwSX19yYW5kb21zdGF0ZV9jdG9ylJOUjAdNVDE5OTM3lIWUUpR9lCiMDWJpdF9nZW5lcmF0b3KUaBOMBXN0YXRllH2UKIwDa2V5lIwSbnVtcHkuY29yZS5udW1lcmljlIwLX2Zyb21idWZmZXKUk5QolsAJAAAAAAAAAAAAgMvWU2I7QWSDdygaT5++KSAATCN90WccEqDtnFV3Y9MyLlpJIkIvbFJbpmpRHJX7SZr4rNIblBqp+ccsvAR6E3PBn61gcJfsxhg3x3PqnSmOqgLqwqIfkWzrz6Tf7NSrm1SSpbmnGNt3g/H87CT3UgGUR8+HrL79sEyK19tohIeSCjwJLlaEiqt73sZ9FJuvWOgJ3QNRV0zMGqSYGeuLv+TZ0EN3+cyUE+2SDHCnC8u+eTF8fJciknY5HW6tbE8ba0T1dJIbNDabvq3Lgzsjaj2J1a7ewVritpsjEYCurZVZpRofQC3nO0jM0C3+YuXA2lZGNYHChc8MyrCvJFQJFlfQ6BPvCeiw5+FaDMxbSonLCcRFZPKJp/lWXhgYkj0swzyhXxNK4EuHqIvN1K+tQ0O3iv/AsSiGk4LYvZONnoPxuDvSFcQq8UdUpcJew8fp98fUiPvPgHKAyO+L/60Vzkqu4hXw9hUKR7cWJQZLV9ChyDHgbsufUNTzdmm58PdxfyD8sb0Fhi30bqDkMwc6vuIXQYXYIgfYVnRcsI8ZmwcYjxg8cQeVZf1hZkd00dKZryQNLb2bz1WRqlWiK1AOV5KEPeonMRrdcwQnEBRqx9ThMDIDdCHWE2moNgX9Lu29fAxYIe//6Duh7KKhp5v80UQ7VD/0ddR8UbUVl4xVW4UneWt9U8AcBpaAgolQEZzIT0q3RzCYbWDwzLzFaawdkttUhKkFiwjDJcESKnKqKDofg5+k4YTJOSO1kagch1NV1JXrfrSfcaUDMG8F27PRTcBF/8idtyoK/uZB9ThefVNaXRCRY/K/zZz7ehTk1GSqe70bQUNaAEdFBYnd+ywk04zhEOrR532pOkyigwWnJdHBJm2VoJlaH9A04Rkw45X+ZhEcpFFBrpzhU/TBs+BgQp/ZZR1WcrWYkGzvojd9ICb8Sm3rD1jGKIuDrtw92SCXLHVCMv5Wm6IbJyVjtfO1ey/FC8aWgnTf7j/rrF5xcXoVpRipjNzX5Pc8VuvLPEPTMynVqU5M40Dr5koIKfSIg6gySM5BKS5t/zKzpo234Ril+u6/qEWjW8BdnXxw/98IhTfAUPlCXWhHyGEOF2hlQA0OG/zY0mmECbWESSFGPFOJzPNOXV/RwcT0jZ8gbecWrVBYrwJfPTHLudSMk+cb17Lwog0aT7oTB0ADU79VLykWKy8iWUGqRcsS0FMHfx4iGcxqht31heDU97MgsQJmlXKdLYP1/I6dXbpgsy/u67kQD9Gc6DjRYf6MdyDVrgymYFM18I6iwSKBIZaf0ZB1AiPK+L9N4eHDVJ1yNvscr5WfwpXg5IhpIJO7E2Rlhbh3xzcWLHo3yj4KLPpwHm2QkjDB3g+Ki/dU2++svSiw4QxzUoXT828rksNjdWXXTOoT6Uc130tGHrmbrfAVnzgUBztr1rkmZmPghFQKId2BR0FkP5MHIsn2tnq7yBXUyyvp+mIJovz5pl0thi5PXl2iiuF6NEbn1rcJ20T5r8gm1lx8gEiHeJss6lg569RDEQPP7gzAjSdS2qGZxEvnyz3QZtpWUXbNsTn+XMObH0/QYw5toa1OjO9wA6j722A5g+vSqeD4H/SOSdR/vl4+m6ZQ8ZjH04RKbGijjsHt0O438qwtwYcmyvBOgEVPUZoJsKm4xIbTANeMWUfW/9en6llQ09Q2GNGYzbPUrXjNPVTzcw7MRMwvfjqL8KSMFxRb41p/8TdxBoLL0Kl8GeA7gzPO76lQg0havYsWjS7Oihv6lY7NdC4fy/ZvTWPeicOus0EdBbVieeskgrHKNAXoP8f/8MT2Kfh5aG69pUbV60nurbRfvbxAQp7QY1LHpt9wdiNBHrmXapt1i9ujL40360NSK5qm9zNaW0EF79jlogPKlKxZI3MMzLpD9a/TxRRQCbc2K+kYySF2kmJWtbyC383WeVDjimpDXpDTPng23MBKrwP+ZU5SynDdFY59LM/DdidhSjcBqa6d0uHGivnMXdui4qFwj1M3fkqCda0ks0pyoNTL9x6ORZZwLL3l6A3sNruF9bVyLi1KSnsE/umc0nRu3EPtaMLzo3ltupslyMhR2aMVjgxE9qykusS4Q9K/wMBItytZsaIg9J9C/SE4w/SSWtq/FT0Tky+uDkGCtR/ESEt0G9ldpIsQTGEO5JXn7MvPOK6elxfmcFxnn5Wegq9zY0lVOaYpuTkeFtSDEJCKDu2AMEjpgXs13GEd7vcDJgl7i3uQaSJWOFaK+l51C6R77PVNOFl/jAHq+Diu+K2PhI6xodlk2ZZNsPYEFyZC+zzkiczrIxx4xx8T0WsCWYZEQMums3oxrEwdQqqbrrPmQSs07U+JfaagHlzslMbmbqiZ/9VXWQjxyfVlGjuM1qxgqeSHyVwvKl6f6DbkpOoYlTnRU9g3C1FENK717sF5/fZbbQMNqsJuaGvpqf57LLRKt/XFPDjHDUB9pbPM1r0TLH5KG74ICd43JrtiaCgNXJxHkYyzKrXQAx/XvilvzBvNLsrDUaoMvlT9ijtcF8dTjTB+U6sP7eAXA0BbuZW+EylZ66meD1auuTfd0MfWNxggoAj3px8RCXRyJkOqxjTgrgUGflH8TjL4mJSsrd9MCQoAqwmN8CrrLff3nlT3YhXUsXXNSI7lqBxXsy2OTNgHlHZmYfLg+67h3KnjOexQhueOod2KqF7EsJNcscbpywzWZmEx66XZre2BVKEbFJnQG0B9GOxbw8T1fGhwRrMuNpprpFRYE7Cir5MNHd7U7JCpO2J9mIKRaeHMdlu8wGONMGAJYjxNKFrJjvc2lFG4yXDmOMxpzYx8jAEFnKRCzlohYowX63r30lQ4KO/+nCK12rJ6Ev2TdjF+bL6tXybdp0eNo+5+MlTDa407XyUrOv6ve6iZTSZvvK9h6M/DFP/JFhv1CmcmlKYYF5oF1chTIAZiG4S+uH/Zn/hw4j9cjKhWCjhViTrooipaq6pyhd1CXpzXXodGrsmlrYX9e47fikDqRsw06oJCUGni5RqsyZWZizGL7GqQlNzsxSRWioTQh0IAEDkhV/Tg3NCJbL6dV7h4idhehjvMc9wki3ApcdPoz3QxY3jHak0bQGkXnRUa+8tsH9zttnNH66piW6P++i1LUHsmpkMLrtOKLDFAW72M8tpJm8I0A7dSD7VO/llyhZN45fvyOyq/h93CJr+tpGqzKWRJZ2n1OekXsEx5jMtdpWGkxn51Xu206pxf9ZmEONv3kabS+LJblwnigtqyvI/nbvWVzBYR4H38BjJgszCdfpqdEJXN+CD/0cWG1uvD4Eq6VRFAt2+kNUgP7ZeKlGgJjAJ1NJSJiIeUUpQoSwNoDU5OTkr/////Sv////9LAHSUYk1wAoWUjAFDlHSUUpSMA3Bvc5RNcAJ1jAloYXNfZ2F1c3OUSwCMBWdhdXNzlEcAAAAAAAAAAHVidWIu",
55
+ "n": 2,
56
+ "_shape": [],
57
+ "dtype": "int64",
58
+ "_np_random": "RandomState(MT19937)"
59
+ },
60
+ "n_envs": 1,
61
+ "num_timesteps": 102400,
62
+ "_total_timesteps": 100000,
63
+ "_num_timesteps_at_start": 0,
64
+ "seed": 1,
65
+ "action_noise": null,
66
+ "start_time": 1670516894814637504,
67
+ "learning_rate": {
68
+ ":type:": "<class 'function'>",
69
+ ":serialized:": "gAWVhQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMUy9ob21lL21heGltaWxpYW4vdmVudi9saWIvcHl0aG9uMy44L3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLgEMCAAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flGgMdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoHn2UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP1RST9rNKDOFlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="
70
+ },
71
+ "tensorboard_log": "runs/seals/CartPole-v0__ppo__7__1670516892/seals-CartPole-v0",
72
+ "lr_schedule": {
73
+ ":type:": "<class 'function'>",
74
+ ":serialized:": "gAWVhQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMUy9ob21lL21heGltaWxpYW4vdmVudi9saWIvcHl0aG9uMy44L3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLgEMCAAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flGgMdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoHn2UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP1RST9rNKDOFlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="
75
+ },
76
+ "_last_obs": null,
77
+ "_last_episode_starts": {
78
+ ":type:": "<class 'numpy.ndarray'>",
79
+ ":serialized:": "gAWVewAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYIAAAAAAAAAAAAAAAAAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlC4="
80
+ },
81
+ "_last_original_obs": null,
82
+ "_episode_num": 0,
83
+ "use_sde": false,
84
+ "sde_sample_freq": -1,
85
+ "_current_progress_remaining": -0.02400000000000002,
86
+ "ep_info_buffer": {
87
+ ":type:": "<class 'collections.deque'>",
88
+ ":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQH9AAAAAAACMAWyUTfQBjAF0lEdAMZfC66J66nV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDGXhvR7Z391fZQoaAZHQH9AAAAAAABoB030AWgIR0Axl0r9VFQVdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAMZcAFPi1iXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDLI36yjYZl1fZQoaAZHQH9AAAAAAABoB030AWgIR0AyyJ7sv7FbdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAMshHkLhJiHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDLH8XN1QqJ1fZQoaAZHQH9AAAAAAABoB030AWgIR0Ayx7KJVKf4dX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAMsd1p0wJxHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDLHOW0JF9d1fZQoaAZHQH9AAAAAAABoB030AWgIR0Ayxu4gA6uGdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAM/rXxvvSdHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDP6mHgxagV1fZQoaAZHQH9AAAAAAABoB030AWgIR0Az+kIX0oSddX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAM/ntWuHN5nV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDP5r1uivgZ1fZQoaAZHQH9AAAAAAABoB030AWgIR0Az+XQtz0YkdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAM/k43m3fAXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDP47nxJ/Xp1fZQoaAZHQH9AAAAAAABoB030AWgIR0A1J6RyOq//dX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdANSdfTkQwsXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDUnCN0eU6h1fZQoaAZHQH9AAAAAAABoB030AWgIR0A1JrQPZqVRdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdANSZ3s5XEInV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDUmPaL4vex1fZQoaAZHQH9AAAAAAABoB030AWgIR0A1JgRsdkrgdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdANSW7voePrHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDZcFeOXE611fZQoaAZHQH9AAAAAAABoB030AWgIR0A2W9VWCEpRdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdANlt92HLzPXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDZbKDCgsbx1fZQoaAZHQH9AAAAAAABoB030AWgIR0A2WunuRcNZdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdANlqtknTiKnV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDZacVgx8D11fZQoaAZHQH9AAAAAAABoB030AWgIR0A2WiYsunMudX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAN5hNZeRgZ3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDeYCcPOIIp1fZQoaAZHQH9AAAAAAABoB030AWgIR0A3l7Qb+98JdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAN5deIEbHZXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDeXH2h7E511fZQoaAZHQH9AAAAAAABoB030AWgIR0A3luM+/xlQdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAN5anJkoWpXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDeWW8h9srN1fZQoaAZHQH9AAAAAAABoB030AWgIR0A6KNmDlHSXdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAOiiYPXkHU3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDooQTVUdaN1fZQoaAZHQH9AAAAAAABoB030AWgIR0A6J+x4Y77sdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAOiewcHWz4XV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDondadMCcR1fZQoaAZHQH9AAAAAAABoB030AWgIR0A6JzqKP4mDdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAOibwvxpco3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDtZEuxrzoV1fZQoaAZHQH9AAAAAAABoB030AWgIR0A7WM23rleXdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAO1h3u/k/8nV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDtYIcBEKE51fZQoaAZHQH9AAAAAAABoB030AWgIR0A7V+MIeHSGdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAO1emR/3Fk3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDtXaews5GV1fZQoaAZHQH9AAAAAAABoB030AWgIR0A7Vx6v7m+1dX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAPH9Jvo/zKHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDx/BJqZc9p1fZQoaAZHQH9AAAAAAABoB030AWgIR0A8fq3EyckMdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAPH5XyRSxaHV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDx+GQCCBf91fZQoaAZHQH9AAAAAAABoB030AWgIR0A8fdxQzk6tdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAPH2f5DZ13nV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQDx9VJcxCY11fZQoaAZHQH9AAAAAAABoB030AWgIR0A9rQKrq+rVdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAPazB2wFC9nV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQD2sa5wwTM91fZQoaAZHQH9AAAAAAABoB030AWgIR0A9rBY3eenRdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAPavYODrZ8XV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQD2rnHNorWl1fZQoaAZHQH9AAAAAAABoB030AWgIR0A9q2Cdz4lAdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAPasV+I/JNnV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQD7iBd2PkrB1fZQoaAZHQH9AAAAAAABoB030AWgIR0A+4cDr7fpEdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAPuFqWTot+XV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQD7hFG5MDfZ1fZQoaAZHQH9AAAAAAABoB030AWgIR0A+4NWluWKNdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAPuCY5T6zmnV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQD7gXGff4yp1fZQoaAZHQH9AAAAAAABoB030AWgIR0A+4BGx2SuAdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQA0sH0K7ZnV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEANDTBqKxd1fZQoaAZHQH9AAAAAAABoB030AWgIR0BADOOsDGLldX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQAy66J66a3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEAMna37UG51fZQoaAZHQH9AAAAAAABoB030AWgIR0BADIFvAGjcdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQAxlg+hXbXV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQEAMQfZElVt1fZQoaAZHQH9AAAAAAABoB030AWgIR0BArgpKBd2QdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQK3pD/lyR3V9lChoBkdAf0AAAAAAAGgHTfQBaAhHQECtvybx3FF1fZQoaAZHQH9AAAAAAABoB030AWgIR0BArZX2dupCdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQK13jdYW+HV9lChoBkdAf0AAAAAAAGgHTfQBaAhHQECtWWhRIjJ1fZQoaAZHQH9AAAAAAABoB030AWgIR0BArTtb9qDcdX2UKGgGR0B/QAAAAAAAaAdN9AFoCEdAQK0VvddmhHVlLg=="
89
+ },
90
+ "ep_success_buffer": {
91
+ ":type:": "<class 'collections.deque'>",
92
+ ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
93
+ },
94
+ "_n_updates": 250,
95
+ "n_steps": 512,
96
+ "gamma": 0.9999,
97
+ "gae_lambda": 0.9,
98
+ "ent_coef": 0.008508727919228772,
99
+ "vf_coef": 0.489343896591493,
100
+ "max_grad_norm": 0.8,
101
+ "batch_size": 256,
102
+ "n_epochs": 10,
103
+ "clip_range": {
104
+ ":type:": "<class 'function'>",
105
+ ":serialized:": "gAWVhQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMUy9ob21lL21heGltaWxpYW4vdmVudi9saWIvcHl0aG9uMy44L3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLgEMCAAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flGgMdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoHn2UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP9mZmZmZmZqFlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="
106
+ },
107
+ "clip_range_vf": null,
108
+ "normalize_advantage": true,
109
+ "target_kl": null
110
+ }
ppo-seals-CartPole-v0/policy.optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f8f95042d63d71329a83889b244855b119ff1fde42470577638bb8a7472455e
3
+ size 82425
ppo-seals-CartPole-v0/policy.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:385b42f25c52b68029dd9c47cf3b1b779c3ec4a4441606218bacd6091fa5bb6f
3
+ size 40513
ppo-seals-CartPole-v0/pytorch_variables.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d030ad8db708280fcae77d87e973102039acd23a11bdecc3db8eb6c0ac940ee1
3
+ size 431
ppo-seals-CartPole-v0/system_info.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
1
+ OS: Linux-5.4.0-125-generic-x86_64-with-glibc2.29 #141-Ubuntu SMP Wed Aug 10 13:42:03 UTC 2022
2
+ Python: 3.8.10
3
+ Stable-Baselines3: 1.6.2
4
+ PyTorch: 1.11.0+cu102
5
+ GPU Enabled: False
6
+ Numpy: 1.22.3
7
+ Gym: 0.21.0
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b7051380170d8c8c4a86101d6def7c0b8950e420b0bbd4775b96c1e170313aa
3
+ size 60768
results.json ADDED
@@ -0,0 +1 @@
 
1
+ {"mean_reward": 500.0, "std_reward": 0.0, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2022-12-29T14:39:42.298440"}
train_eval_metrics.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:913814bce3741222e544117345a87cf5fd970bd301ff0985679b10763887659e
3
+ size 6702