ernestum commited on
Commit
d4cc9d9
1 Parent(s): 644fe50

Initial commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: stable-baselines3
3
+ tags:
4
+ - seals/Ant-v1
5
+ - deep-reinforcement-learning
6
+ - reinforcement-learning
7
+ - stable-baselines3
8
+ model-index:
9
+ - name: SAC
10
+ results:
11
+ - task:
12
+ type: reinforcement-learning
13
+ name: reinforcement-learning
14
+ dataset:
15
+ name: seals/Ant-v1
16
+ type: seals/Ant-v1
17
+ metrics:
18
+ - type: mean_reward
19
+ value: 1004.15 +/- 26.60
20
+ name: mean_reward
21
+ verified: false
22
+ ---
23
+
24
+ # **SAC** Agent playing **seals/Ant-v1**
25
+ This is a trained model of a **SAC** agent playing **seals/Ant-v1**
26
+ using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3)
27
+ and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo).
28
+
29
+ The RL Zoo is a training framework for Stable Baselines3
30
+ reinforcement learning agents,
31
+ with hyperparameter optimization and pre-trained agents included.
32
+
33
+ ## Usage (with SB3 RL Zoo)
34
+
35
+ RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/>
36
+ SB3: https://github.com/DLR-RM/stable-baselines3<br/>
37
+ SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
38
+
39
+ Install the RL Zoo (with SB3 and SB3-Contrib):
40
+ ```bash
41
+ pip install rl_zoo3
42
+ ```
43
+
44
+ ```
45
+ # Download model and save it into the logs/ folder
46
+ python -m rl_zoo3.load_from_hub --algo sac --env seals/Ant-v1 -orga ernestum -f logs/
47
+ python -m rl_zoo3.enjoy --algo sac --env seals/Ant-v1 -f logs/
48
+ ```
49
+
50
+ If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do:
51
+ ```
52
+ python -m rl_zoo3.load_from_hub --algo sac --env seals/Ant-v1 -orga ernestum -f logs/
53
+ python -m rl_zoo3.enjoy --algo sac --env seals/Ant-v1 -f logs/
54
+ ```
55
+
56
+ ## Training (with the RL Zoo)
57
+ ```
58
+ python -m rl_zoo3.train --algo sac --env seals/Ant-v1 -f logs/
59
+ # Upload the model and generate video (when possible)
60
+ python -m rl_zoo3.push_to_hub --algo sac --env seals/Ant-v1 -f logs/ -orga ernestum
61
+ ```
62
+
63
+ ## Hyperparameters
64
+ ```python
65
+ OrderedDict([('batch_size', 512),
66
+ ('buffer_size', 1000000),
67
+ ('gamma', 0.98),
68
+ ('learning_rate', 0.0018514039303149058),
69
+ ('learning_starts', 1000),
70
+ ('n_timesteps', 1000000.0),
71
+ ('policy', 'MlpPolicy'),
72
+ ('policy_kwargs',
73
+ {'log_std_init': -2.2692589009754176,
74
+ 'net_arch': [256, 256],
75
+ 'use_sde': False}),
76
+ ('tau', 0.05),
77
+ ('train_freq', 64),
78
+ ('normalize', False)])
79
+ ```
args.yml ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !!python/object/apply:collections.OrderedDict
2
+ - - - algo
3
+ - sac
4
+ - - conf_file
5
+ - hyperparams/python/sac.py
6
+ - - device
7
+ - cpu
8
+ - - env
9
+ - seals/Ant-v1
10
+ - - env_kwargs
11
+ - null
12
+ - - eval_episodes
13
+ - 0
14
+ - - eval_freq
15
+ - 25000
16
+ - - gym_packages
17
+ - - seals
18
+ - - hyperparams
19
+ - null
20
+ - - log_folder
21
+ - gymnasium_models
22
+ - - log_interval
23
+ - -1
24
+ - - max_total_trials
25
+ - null
26
+ - - n_eval_envs
27
+ - 1
28
+ - - n_evaluations
29
+ - null
30
+ - - n_jobs
31
+ - 1
32
+ - - n_startup_trials
33
+ - 10
34
+ - - n_timesteps
35
+ - -1
36
+ - - n_trials
37
+ - 500
38
+ - - no_optim_plots
39
+ - false
40
+ - - num_threads
41
+ - 4
42
+ - - optimization_log_path
43
+ - null
44
+ - - optimize_hyperparameters
45
+ - false
46
+ - - progress
47
+ - false
48
+ - - pruner
49
+ - median
50
+ - - sampler
51
+ - tpe
52
+ - - save_freq
53
+ - -1
54
+ - - save_replay_buffer
55
+ - false
56
+ - - seed
57
+ - 339003414
58
+ - - storage
59
+ - null
60
+ - - study_name
61
+ - null
62
+ - - tensorboard_log
63
+ - ''
64
+ - - track
65
+ - false
66
+ - - trained_agent
67
+ - ''
68
+ - - truncate_last_trajectory
69
+ - true
70
+ - - uuid
71
+ - false
72
+ - - vec_env
73
+ - dummy
74
+ - - verbose
75
+ - 1
76
+ - - wandb_entity
77
+ - null
78
+ - - wandb_project_name
79
+ - sb3
80
+ - - wandb_tags
81
+ - []
config.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !!python/object/apply:collections.OrderedDict
2
+ - - - batch_size
3
+ - 512
4
+ - - buffer_size
5
+ - 1000000
6
+ - - gamma
7
+ - 0.98
8
+ - - learning_rate
9
+ - 0.0018514039303149058
10
+ - - learning_starts
11
+ - 1000
12
+ - - n_timesteps
13
+ - 1000000.0
14
+ - - policy
15
+ - MlpPolicy
16
+ - - policy_kwargs
17
+ - log_std_init: -2.2692589009754176
18
+ net_arch:
19
+ - 256
20
+ - 256
21
+ use_sde: false
22
+ - - tau
23
+ - 0.05
24
+ - - train_freq
25
+ - 64
env_kwargs.yml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"mean_reward": 1004.1500469999999, "std_reward": 26.598463652396816, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-09-15T13:54:20.973826"}
sac-seals-Ant-v1.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875d090ecd5d25b9bc78d82d72ec3b2ab367bd57776237a14da800934c383d0c
3
+ size 3402061
sac-seals-Ant-v1/_stable_baselines3_version ADDED
@@ -0,0 +1 @@
 
 
1
+ 2.1.0
sac-seals-Ant-v1/actor.optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e09680857f0154f1cf9c65dd9485bd3989d6bb12999445f6e740275897e74e2
3
+ size 626845
sac-seals-Ant-v1/critic.optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6afa47b5c64a31315b2d74419c08310eedbf1dd25a4968eb6fc95d008d1235e2
3
+ size 1221625
sac-seals-Ant-v1/data ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "policy_class": {
3
+ ":type:": "<class 'abc.ABCMeta'>",
4
+ ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLnNhYy5wb2xpY2llc5SMCVNBQ1BvbGljeZSTlC4=",
5
+ "__module__": "stable_baselines3.sac.policies",
6
+ "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}",
7
+ "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
8
+ "__init__": "<function SACPolicy.__init__ at 0x7fcd4a7918b0>",
9
+ "_build": "<function SACPolicy._build at 0x7fcd4a791940>",
10
+ "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x7fcd4a7919d0>",
11
+ "reset_noise": "<function SACPolicy.reset_noise at 0x7fcd4a791a60>",
12
+ "make_actor": "<function SACPolicy.make_actor at 0x7fcd4a791af0>",
13
+ "make_critic": "<function SACPolicy.make_critic at 0x7fcd4a791b80>",
14
+ "forward": "<function SACPolicy.forward at 0x7fcd4a791c10>",
15
+ "_predict": "<function SACPolicy._predict at 0x7fcd4a791ca0>",
16
+ "set_training_mode": "<function SACPolicy.set_training_mode at 0x7fcd4a791d30>",
17
+ "__abstractmethods__": "frozenset()",
18
+ "_abc_impl": "<_abc_data object at 0x7fcd4a7928d0>"
19
+ },
20
+ "verbose": 1,
21
+ "policy_kwargs": {
22
+ "net_arch": [
23
+ 256,
24
+ 256
25
+ ],
26
+ "log_std_init": -2.2692589009754176,
27
+ "use_sde": false
28
+ },
29
+ "num_timesteps": 1000000,
30
+ "_total_timesteps": 1000000,
31
+ "_num_timesteps_at_start": 0,
32
+ "seed": 0,
33
+ "action_noise": null,
34
+ "start_time": 1694771152660236841,
35
+ "learning_rate": {
36
+ ":type:": "<class 'function'>",
37
+ ":serialized:": "gAWVlwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMZS9ob21lL21heGltaWxpYW4vcmwtYmFzZWxpbmVzMy16b28vdmVudi9saWIvcHl0aG9uMy44L3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLg0MCAAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flGgMdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoHn2UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP15VWdVF2e2FlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="
38
+ },
39
+ "tensorboard_log": null,
40
+ "_last_obs": null,
41
+ "_last_episode_starts": {
42
+ ":type:": "<class 'numpy.ndarray'>",
43
+ ":serialized:": "gAWVdAAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYBAAAAAAAAAAGUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwGFlIwBQ5R0lFKULg=="
44
+ },
45
+ "_last_original_obs": {
46
+ ":type:": "<class 'numpy.ndarray'>",
47
+ ":serialized:": "gAWVXQEAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJboAAAAAAAAAHVlbsbbNAhA68iY6Ew36L9VeLMk5J7QP3bw+ulWItE/w8SwpKp61z9AupfMbjjsP6yjY88nMsC//ubG/vIi4D/1RcKIIa/gP140oTgByOC/Z06M9g048L8w9u+TRarRvyfym871jfO/7NsB3jtc2r8c8KtSLsfwP22mq1OKLbE/Mhfcgcq/tb843zXt5hY1P6pcHN43b8I/4WW6W3iD2z/wqRKMENvUP1epU4PDoui/RAkywICxsj81XKYDU3UcP2w7PEiHzdk/AOu8S6H1xj+AM0lDs6+BP2QYxZvo/7I/VIEqVhnw67+UjAVudW1weZSMBWR0eXBllJOUjAJmOJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwFLHYaUjAFDlHSUUpQu"
48
+ },
49
+ "_episode_num": 1000,
50
+ "use_sde": false,
51
+ "sde_sample_freq": -1,
52
+ "_current_progress_remaining": 0.0,
53
+ "_stats_window_size": 100,
54
+ "ep_info_buffer": {
55
+ ":type:": "<class 'collections.deque'>",
56
+ ":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQI1E4Xdj5KyMAWyUTegDjAF0lEdAjKMky1uzhXV9lChoBkdAi11reZXuE2gHTegDaAhHQIyrIK2KEWZ1fZQoaAZHQI2597Uoa1loB03oA2gIR0CMs3w6QvHtdX2UKGgGR0CNj8J53TuwaAdN6ANoCEdAjLuSro4dZXV9lChoBkdAjH3PvSc9XGgHTegDaAhHQIzD87W/ag51fZQoaAZHQI3e3oRqXWxoB03oA2gIR0CMzEf29L6DdX2UKGgGR0CN49QkX1rZaAdN6ANoCEdAjNRsolUp/nV9lChoBkdAkCObIYFaCGgHTegDaAhHQIzclQ66reZ1fZQoaAZHQI+9b4+KTB9oB03oA2gIR0CM5L7VJ+UhdX2UKGgGR0CNyp+6y0KJaAdN6ANoCEdAjOzgxBVuJnV9lChoBkdAjYv0U47zTWgHTegDaAhHQIz1HFDOTq11fZQoaAZHQIbslIGyHEdoB03oA2gIR0CM/UoR7JGOdX2UKGgGR0CNg44I8hcJaAdN6ANoCEdAjQWSteUpu3V9lChoBkdAjm/qjJuEVWgHTegDaAhHQI0Nqjk+5e91fZQoaAZHQI1A8/MW43FoB03oA2gIR0CNFatkFwDOdX2UKGgGR0CNYFFnZkCnaAdN6ANoCEdAjR4BM8HObHV9lChoBkdAjkdxXGOuJWgHTegDaAhHQI0mWaz/p+t1fZQoaAZHQI3BeUB4lhRoB03oA2gIR0CNLqTjebd8dX2UKGgGR0CNPJ9AHE/CaAdN6ANoCEdAjTcPLowEhnV9lChoBkdAj089ic5Ke2gHTegDaAhHQI0/V9lVcUx1fZQoaAZHQI9zY1rIo3JoB03oA2gIR0CNR6/Yao/BdX2UKGgGR0CKXtgH/tIDaAdN6ANoCEdAjVAT4DcM3XV9lChoBkdAjQmKS5iEx2gHTegDaAhHQI1YHI4lyBF1fZQoaAZHQI9vNdE9dNZoB03oA2gIR0CNYB9zfaYedX2UKGgGR0COsHJmNBGAaAdN6ANoCEdAjWhIKD0163V9lChoBkdAjy4vMbFS9GgHTegDaAhHQI1waL876pJ1fZQoaAZHQI1TbnX/YJ5oB03oA2gIR0CNeM14Pf8/dX2UKGgGR0CO+enJDE3saAdN6ANoCEdAjYDJY1YQrnV9lChoBkdAjwn2Nm16V2gHTegDaAhHQI2IxVsDW9V1fZQoaAZHQIz40brC3w1oB03oA2gIR0CNkOx/ustDdX2UKGgGR0CPjan1FpfyaAdN6ANoCEdAjZjC6xxDLXV9lChoBkdAjyoVjiGWU2gHTegDaAhHQI2g7h99c8l1fZQoaAZHQI1XhvBJqZdoB03oA2gIR0CNqNlpXZGsdX2UKGgGR0COe6oiLVFyaAdN6ANoCEdAjbC/2bobGXV9lChoBkdAjveKP4mCy2gHTegDaAhHQI24zOX3QD51fZQoaAZHQI5gv2TPjXFoB03oA2gIR0CNwNd0JWvKdX2UKGgGR0CPxsp3HJcPaAdN6ANoCEdAjcjw7DEWI3V9lChoBkdAkJztNFjNIWgHTegDaAhHQI3RFWluWKN1fZQoaAZHQIzARXbM5fdoB03oA2gIR0CN2TJpWV/udX2UKGgGR0CLSxBkZrHmaAdN6ANoCEdAjeFQ3YL9dnV9lChoBkdAjp2AhKUVz2gHTegDaAhHQI3pS9ytFKF1fZQoaAZHQI9Opsyi22JoB03oA2gIR0CN8VIKc/dJdX2UKGgGR0CNaVXpW3jNaAdN6ANoCEdAjflaF23az3V9lChoBkdAjTQLwnYxtmgHTegDaAhHQI4BmVVxS511fZQoaAZHQI8NPc+JP69oB03oA2gIR0COCa7YkE9udX2UKGgGR0COy87Wd3B6aAdN6ANoCEdAjhIOEVWS2nV9lChoBkdAjsAY3FUADWgHTegDaAhHQI4aJq20AtF1fZQoaAZHQIysz67/XGxoB03oA2gIR0COIidc0LtvdX2UKGgGR0CP9u5Gz8gqaAdN6ANoCEdAjiow482aUnV9lChoBkdAi96vO6d1+2gHTegDaAhHQI4x8th/iHZ1fZQoaAZHQI2LkM3IdU9oB03oA2gIR0COOdOtW+49dX2UKGgGR0CIrHTDwYtQaAdN6ANoCEdAjkGXR5TqB3V9lChoBkdAj9MddNWU8mgHTegDaAhHQI5JbPWxyGV1fZQoaAZHQI/Eoh0Qsf9oB03oA2gIR0COUX0YCQtBdX2UKGgGR0COLfMEidJ8aAdN6ANoCEdAjlmrNfPX1HV9lChoBkdAi6YFmFrVOWgHTegDaAhHQI5itIK+i8F1fZQoaAZHQI4qbTQVsUJoB03oA2gIR0COaxcB2fTTdX2UKGgGR0CMHu2606YFaAdN6ANoCEdAjnMyEUTL4nV9lChoBkdAi4NwiiZfD2gHTegDaAhHQI57Ov8qFyt1fZQoaAZHQJAed4ptrKxoB03oA2gIR0COg1ShrWRSdX2UKGgGR0CO+6dU83dcaAdN6ANoCEdAjoumGmDUVnV9lChoBkdAjmAVfeDWb2gHTegDaAhHQI6T+hdt2s91fZQoaAZHQI00vY8Md95oB03oA2gIR0COnEOkLx7RdX2UKGgGR0CPV5uNPxhEaAdN6ANoCEdAjqSDVhCtzXV9lChoBkdAjovMPJ7swGgHTegDaAhHQI6sdR3u/lB1fZQoaAZHQI732/gzguRoB03oA2gIR0COtLP+n62wdX2UKGgGR0CNtJBX0XgtaAdN6ANoCEdAjrzq1og3cnV9lChoBkdAjclHcDbJwWgHTegDaAhHQI7E6Skj5bh1fZQoaAZHQI4V0GRmseZoB03oA2gIR0COzQHvc8DCdX2UKGgGR0COxvs+FDfFaAdN6ANoCEdAjtUQjt5UtXV9lChoBkdAje3S6MBIWmgHTegDaAhHQI7ctDc/MW51fZQoaAZHQIvk79GZuyhoB03oA2gIR0CO5Mw9q1w6dX2UKGgGR0CQa1/8l5WzaAdN6ANoCEdAju0MEaESNHV9lChoBkdAja0qeCkGimgHTegDaAhHQI71BrN4Z/F1fZQoaAZHQI7hZ60IC2doB03oA2gIR0CO/U87p3X7dX2UKGgGR0CQfdDkELYxaAdN6ANoCEdAjwVcBEKE4HV9lChoBkdAkDQHQ6ZH/mgHTegDaAhHQI8NgEjgQ6J1fZQoaAZHQI3i+oaUA1hoB03oA2gIR0CPFjptaY/ndX2UKGgGR0COiucBEKE4aAdN6ANoCEdAjx4HH/95yHV9lChoBkdAi/qZpBX0XmgHTegDaAhHQI8l7YbsF+x1fZQoaAZHQI5xR8c+7lJoB03oA2gIR0CPLeDU3GXHdX2UKGgGR0CMRrVGTcIraAdN6ANoCEdAjzXchTwUg3V9lChoBkdAjge1dxAB1mgHTegDaAhHQI8+C5sj3VV1fZQoaAZHQIlv9vn8sMBoB03oA2gIR0CPRgTUy57PdX2UKGgGR0CMz1/7zkIYaAdN6ANoCEdAj05I8ZDRdHV9lChoBkdAj67TrE9+w2gHTegDaAhHQI9Wb0Dlo111fZQoaAZHQIyJsZNwiq1oB03oA2gIR0CPXl/tIClrdX2UKGgGR0COSjFcY64laAdN6ANoCEdAj2ZgQpWmxnV9lChoBkdAilMM1jy4F2gHTegDaAhHQI9uQg/1QIl1fZQoaAZHQI7O9WCEpRZoB03oA2gIR0CPdmUj9n9OdX2UKGgGR0CPq1yWAwwkaAdN6ANoCEdAj36HX/YJ3XV9lChoBkdAj21TRYzSC2gHTegDaAhHQI+Ge4I8hcJ1fZQoaAZHQI3pZlpXZGtoB03oA2gIR0CPjoaWHDaXdX2UKGgGR0COL6P/7zkIaAdN6ANoCEdAj5arzoUzsXV9lChoBkdAjPYpiiItUWgHTegDaAhHQI+et4qwyIp1fZQoaAZHQIrCi6OHWSVoB03oA2gIR0CPpyn/kvK2dX2UKGgGR0COJYNrj5sTaAdN6ANoCEdAj69s90RvnHV9lChoBkdAj8dZR0lqrWgHTegDaAhHQI+3jG3nZCh1fZQoaAZHQI0eePeYUnJoB03oA2gIR0CPv6pn6EamdX2UKGgGR0COTVGOuJUHaAdN6ANoCEdAj8eixu89OnVlLg=="
57
+ },
58
+ "ep_success_buffer": {
59
+ ":type:": "<class 'collections.deque'>",
60
+ ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
61
+ },
62
+ "_n_updates": 15610,
63
+ "observation_space": {
64
+ ":type:": "<class 'gymnasium.spaces.box.Box'>",
65
+ ":serialized:": "gAWVgwMAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY4lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJRoB4wCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksdhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoECiWHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJRoFEsdhZRoGHSUUpSMBl9zaGFwZZRLHYWUjANsb3eUaBAolugAAAAAAAAAAAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/5RoCksdhZRoGHSUUpSMBGhpZ2iUaBAolugAAAAAAAAAAAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwf5RoCksdhZRoGHSUUpSMCGxvd19yZXBylIwELWluZpSMCWhpZ2hfcmVwcpSMA2luZpSMCl9ucF9yYW5kb22UTnViLg==",
66
+ "dtype": "float64",
67
+ "bounded_below": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False False]",
68
+ "bounded_above": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False False]",
69
+ "_shape": [
70
+ 29
71
+ ],
72
+ "low": "[-inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf\n -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf\n -inf]",
73
+ "high": "[inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf\n inf inf inf inf inf inf inf inf inf inf inf]",
74
+ "low_repr": "-inf",
75
+ "high_repr": "inf",
76
+ "_np_random": null
77
+ },
78
+ "action_space": {
79
+ ":type:": "<class 'gymnasium.spaces.box.Box'>",
80
+ ":serialized:": "gAWVjAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoB4wCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoECiWCAAAAAAAAAABAQEBAQEBAZRoFEsIhZRoGHSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBAoliAAAAAAAAAAAACAvwAAgL8AAIC/AACAvwAAgL8AAIC/AACAvwAAgL+UaApLCIWUaBh0lFKUjARoaWdolGgQKJYgAAAAAAAAAAAAgD8AAIA/AACAPwAAgD8AAIA/AACAPwAAgD8AAIA/lGgKSwiFlGgYdJRSlIwIbG93X3JlcHKUjAQtMS4wlIwJaGlnaF9yZXBylIwDMS4wlIwKX25wX3JhbmRvbZSMFG51bXB5LnJhbmRvbS5fcGlja2xllIwQX19nZW5lcmF0b3JfY3RvcpSTlIwFUENHNjSUaDGMFF9fYml0X2dlbmVyYXRvcl9jdG9ylJOUhpRSlH2UKIwNYml0X2dlbmVyYXRvcpSMBVBDRzY0lIwFc3RhdGWUfZQoaDyKEONhlaa3XlgJLUWWWTS1oRqMA2luY5SKEKlzeES8M4FYghr3OtvajUF1jApoYXNfdWludDMylEsAjAh1aW50ZWdlcpRLAHVidWIu",
81
+ "dtype": "float32",
82
+ "bounded_below": "[ True True True True True True True True]",
83
+ "bounded_above": "[ True True True True True True True True]",
84
+ "_shape": [
85
+ 8
86
+ ],
87
+ "low": "[-1. -1. -1. -1. -1. -1. -1. -1.]",
88
+ "high": "[1. 1. 1. 1. 1. 1. 1. 1.]",
89
+ "low_repr": "-1.0",
90
+ "high_repr": "1.0",
91
+ "_np_random": "Generator(PCG64)"
92
+ },
93
+ "n_envs": 1,
94
+ "buffer_size": 1,
95
+ "batch_size": 512,
96
+ "learning_starts": 1000,
97
+ "tau": 0.05,
98
+ "gamma": 0.98,
99
+ "gradient_steps": 1,
100
+ "optimize_memory_usage": false,
101
+ "replay_buffer_class": {
102
+ ":type:": "<class 'abc.ABCMeta'>",
103
+ ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
104
+ "__module__": "stable_baselines3.common.buffers",
105
+ "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
106
+ "__init__": "<function ReplayBuffer.__init__ at 0x7fcd4a75b820>",
107
+ "add": "<function ReplayBuffer.add at 0x7fcd4a75b8b0>",
108
+ "sample": "<function ReplayBuffer.sample at 0x7fcd4a75b940>",
109
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x7fcd4a75b9d0>",
110
+ "_maybe_cast_dtype": "<staticmethod object at 0x7fcd4a7615e0>",
111
+ "__abstractmethods__": "frozenset()",
112
+ "_abc_impl": "<_abc_data object at 0x7fcd4a761600>"
113
+ },
114
+ "replay_buffer_kwargs": {},
115
+ "train_freq": {
116
+ ":type:": "<class 'stable_baselines3.common.type_aliases.TrainFreq'>",
117
+ ":serialized:": "gAWVYQAAAAAAAACMJXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi50eXBlX2FsaWFzZXOUjAlUcmFpbkZyZXGUk5RLQGgAjBJUcmFpbkZyZXF1ZW5jeVVuaXSUk5SMBHN0ZXCUhZRSlIaUgZQu"
118
+ },
119
+ "use_sde_at_warmup": false,
120
+ "target_entropy": -8.0,
121
+ "ent_coef": "auto",
122
+ "target_update_interval": 1,
123
+ "lr_schedule": {
124
+ ":type:": "<class 'function'>",
125
+ ":serialized:": "gAWVlwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMZS9ob21lL21heGltaWxpYW4vcmwtYmFzZWxpbmVzMy16b28vdmVudi9saWIvcHl0aG9uMy44L3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLg0MCAAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flGgMdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoHn2UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP15VWdVF2e2FlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="
126
+ },
127
+ "batch_norm_stats": [],
128
+ "batch_norm_stats_target": []
129
+ }
sac-seals-Ant-v1/ent_coef_optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6188361c13d2ce04327d1ed75d95229a15f4d7cd6420552793706998cbe00d96
3
+ size 1507
sac-seals-Ant-v1/policy.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a082101a2a842f84130d6bd17cc93e477490b979c9403189310809adac7e8976
3
+ size 1533253
sac-seals-Ant-v1/pytorch_variables.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08ec2892a29f7f53a942ba6225998a5b666a9ee2ce7c1923d59a24785964308c
3
+ size 747
sac-seals-Ant-v1/system_info.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ - OS: Linux-5.4.0-156-generic-x86_64-with-glibc2.29 # 173-Ubuntu SMP Tue Jul 11 07:25:22 UTC 2023
2
+ - Python: 3.8.10
3
+ - Stable-Baselines3: 2.1.0
4
+ - PyTorch: 2.0.1+cu117
5
+ - GPU Enabled: False
6
+ - Numpy: 1.24.4
7
+ - Cloudpickle: 2.2.1
8
+ - Gymnasium: 0.29.1
9
+ - OpenAI Gym: 0.21.0
train_eval_metrics.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e43661297925c75d1f133dc5aaeaf10c89148d5aad4614db0df232fbddbdee
3
+ size 29198