ernestum commited on
Commit
81f5fbe
1 Parent(s): d4cc9d9

Initial commit

Browse files
README.md CHANGED
@@ -77,3 +77,8 @@ OrderedDict([('batch_size', 512),
77
  ('train_freq', 64),
78
  ('normalize', False)])
79
  ```
 
 
 
 
 
 
77
  ('train_freq', 64),
78
  ('normalize', False)])
79
  ```
80
+
81
+ # Environment Arguments
82
+ ```python
83
+ {'render_mode': 'rgb_array'}
84
+ ```
env_kwargs.yml CHANGED
@@ -1 +1 @@
1
- {}
 
1
+ render_mode: rgb_array
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4ed1b56b5eda9be49af251cc784ac6ecde4042ab6e958fcd18172c91baaefe3
3
+ size 1213986
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 1004.1500469999999, "std_reward": 26.598463652396816, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-09-15T13:54:20.973826"}
 
1
+ {"mean_reward": 1004.1500469999999, "std_reward": 26.598463652396816, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-09-18T09:54:06.014141"}
sac-seals-Ant-v1.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:875d090ecd5d25b9bc78d82d72ec3b2ab367bd57776237a14da800934c383d0c
3
- size 3402061
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8a90529d2bdbf00e4bd3f1157597fd90f35c65b76a88c707158c4ec5478c662
3
+ size 3402065
sac-seals-Ant-v1/_stable_baselines3_version CHANGED
@@ -1 +1 @@
1
- 2.1.0
 
1
+ 2.2.0a3
sac-seals-Ant-v1/data CHANGED
@@ -5,17 +5,17 @@
5
  "__module__": "stable_baselines3.sac.policies",
6
  "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}",
7
  "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
8
- "__init__": "<function SACPolicy.__init__ at 0x7fcd4a7918b0>",
9
- "_build": "<function SACPolicy._build at 0x7fcd4a791940>",
10
- "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x7fcd4a7919d0>",
11
- "reset_noise": "<function SACPolicy.reset_noise at 0x7fcd4a791a60>",
12
- "make_actor": "<function SACPolicy.make_actor at 0x7fcd4a791af0>",
13
- "make_critic": "<function SACPolicy.make_critic at 0x7fcd4a791b80>",
14
- "forward": "<function SACPolicy.forward at 0x7fcd4a791c10>",
15
- "_predict": "<function SACPolicy._predict at 0x7fcd4a791ca0>",
16
- "set_training_mode": "<function SACPolicy.set_training_mode at 0x7fcd4a791d30>",
17
  "__abstractmethods__": "frozenset()",
18
- "_abc_impl": "<_abc_data object at 0x7fcd4a7928d0>"
19
  },
20
  "verbose": 1,
21
  "policy_kwargs": {
@@ -103,13 +103,13 @@
103
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
104
  "__module__": "stable_baselines3.common.buffers",
105
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
106
- "__init__": "<function ReplayBuffer.__init__ at 0x7fcd4a75b820>",
107
- "add": "<function ReplayBuffer.add at 0x7fcd4a75b8b0>",
108
- "sample": "<function ReplayBuffer.sample at 0x7fcd4a75b940>",
109
- "_get_samples": "<function ReplayBuffer._get_samples at 0x7fcd4a75b9d0>",
110
- "_maybe_cast_dtype": "<staticmethod object at 0x7fcd4a7615e0>",
111
  "__abstractmethods__": "frozenset()",
112
- "_abc_impl": "<_abc_data object at 0x7fcd4a761600>"
113
  },
114
  "replay_buffer_kwargs": {},
115
  "train_freq": {
 
5
  "__module__": "stable_baselines3.sac.policies",
6
  "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}",
7
  "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
8
+ "__init__": "<function SACPolicy.__init__ at 0x7efd99831700>",
9
+ "_build": "<function SACPolicy._build at 0x7efd99831790>",
10
+ "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x7efd99831820>",
11
+ "reset_noise": "<function SACPolicy.reset_noise at 0x7efd998318b0>",
12
+ "make_actor": "<function SACPolicy.make_actor at 0x7efd99831940>",
13
+ "make_critic": "<function SACPolicy.make_critic at 0x7efd998319d0>",
14
+ "forward": "<function SACPolicy.forward at 0x7efd99831a60>",
15
+ "_predict": "<function SACPolicy._predict at 0x7efd99831af0>",
16
+ "set_training_mode": "<function SACPolicy.set_training_mode at 0x7efd99831b80>",
17
  "__abstractmethods__": "frozenset()",
18
+ "_abc_impl": "<_abc_data object at 0x7efd99828ab0>"
19
  },
20
  "verbose": 1,
21
  "policy_kwargs": {
 
103
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
104
  "__module__": "stable_baselines3.common.buffers",
105
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
106
+ "__init__": "<function ReplayBuffer.__init__ at 0x7efd9987e700>",
107
+ "add": "<function ReplayBuffer.add at 0x7efd9987e790>",
108
+ "sample": "<function ReplayBuffer.sample at 0x7efd9987e820>",
109
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x7efd9987e8b0>",
110
+ "_maybe_cast_dtype": "<staticmethod object at 0x7efd998777c0>",
111
  "__abstractmethods__": "frozenset()",
112
+ "_abc_impl": "<_abc_data object at 0x7efd998777e0>"
113
  },
114
  "replay_buffer_kwargs": {},
115
  "train_freq": {
sac-seals-Ant-v1/system_info.txt CHANGED
@@ -1,6 +1,6 @@
1
  - OS: Linux-5.4.0-156-generic-x86_64-with-glibc2.29 # 173-Ubuntu SMP Tue Jul 11 07:25:22 UTC 2023
2
  - Python: 3.8.10
3
- - Stable-Baselines3: 2.1.0
4
  - PyTorch: 2.0.1+cu117
5
  - GPU Enabled: False
6
  - Numpy: 1.24.4
 
1
  - OS: Linux-5.4.0-156-generic-x86_64-with-glibc2.29 # 173-Ubuntu SMP Tue Jul 11 07:25:22 UTC 2023
2
  - Python: 3.8.10
3
+ - Stable-Baselines3: 2.2.0a3
4
  - PyTorch: 2.0.1+cu117
5
  - GPU Enabled: False
6
  - Numpy: 1.24.4
train_eval_metrics.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0e43661297925c75d1f133dc5aaeaf10c89148d5aad4614db0df232fbddbdee
3
  size 29198
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d56124deccc53907b7f489dc0d43878a1d2b1a91f0b09055d79d130e20ea157f
3
  size 29198