jren123 commited on
Commit
bfe7399
1 Parent(s): 8588fd3

Initial commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: stable-baselines3
3
+ tags:
4
+ - Walker2d-v4
5
+ - deep-reinforcement-learning
6
+ - reinforcement-learning
7
+ - stable-baselines3
8
+ model-index:
9
+ - name: SAC
10
+ results:
11
+ - task:
12
+ type: reinforcement-learning
13
+ name: reinforcement-learning
14
+ dataset:
15
+ name: Walker2d-v4
16
+ type: Walker2d-v4
17
+ metrics:
18
+ - type: mean_reward
19
+ value: 4201.90 +/- 62.23
20
+ name: mean_reward
21
+ verified: false
22
+ ---
23
+
24
+ # **SAC** Agent playing **Walker2d-v4**
25
+ This is a trained model of a **SAC** agent playing **Walker2d-v4**
26
+ using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
27
+
28
+ ## Usage (with Stable-baselines3)
29
+ TODO: Add your code
30
+
31
+
32
+ ```python
33
+ from stable_baselines3 import ...
34
+ from huggingface_sb3 import load_from_hub
35
+
36
+ ...
37
+ ```
SAC-Walker2d-v4.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d3bae5c18227748ae806814508b2a4cb4b80638507b819208d7aa7f458aa494
3
+ size 3239752
SAC-Walker2d-v4/_stable_baselines3_version ADDED
@@ -0,0 +1 @@
 
 
1
+ 2.3.2
SAC-Walker2d-v4/actor.optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01fcb5266c2c364ac6d42dda92611e70c712463f84ac81813b7022d750aa682e
3
+ size 594510
SAC-Walker2d-v4/critic.optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8e7d6c658a6012e912766e722f0c3e2eaf7ae42c449ebdaed930297e3e964b7
3
+ size 1164714
SAC-Walker2d-v4/data ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "policy_class": {
3
+ ":type:": "<class 'abc.ABCMeta'>",
4
+ ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLnNhYy5wb2xpY2llc5SMCVNBQ1BvbGljeZSTlC4=",
5
+ "__module__": "stable_baselines3.sac.policies",
6
+ "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}",
7
+ "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
8
+ "__init__": "<function SACPolicy.__init__ at 0x11d347240>",
9
+ "_build": "<function SACPolicy._build at 0x11d347880>",
10
+ "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x11d347920>",
11
+ "reset_noise": "<function SACPolicy.reset_noise at 0x11d3479c0>",
12
+ "make_actor": "<function SACPolicy.make_actor at 0x11d347a60>",
13
+ "make_critic": "<function SACPolicy.make_critic at 0x11d347b00>",
14
+ "forward": "<function SACPolicy.forward at 0x11d347ba0>",
15
+ "_predict": "<function SACPolicy._predict at 0x11d347c40>",
16
+ "set_training_mode": "<function SACPolicy.set_training_mode at 0x11d347ce0>",
17
+ "__abstractmethods__": "frozenset()",
18
+ "_abc_impl": "<_abc._abc_data object at 0x11d360340>"
19
+ },
20
+ "verbose": 0,
21
+ "policy_kwargs": {
22
+ "use_sde": false
23
+ },
24
+ "num_timesteps": 890000,
25
+ "_total_timesteps": 1000000,
26
+ "_num_timesteps_at_start": 0,
27
+ "seed": null,
28
+ "action_noise": null,
29
+ "start_time": 1718033144906360391,
30
+ "learning_rate": 0.0003,
31
+ "tensorboard_log": null,
32
+ "_last_obs": {
33
+ ":type:": "<class 'numpy.ndarray'>",
34
+ ":serialized:": "gAWV/QAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJaIAAAAAAAAAHMeCHpB3PI/DFpikkrE4j8qMOIo+F7Fv8aKck+w3IY/rp5m/LHE6j9Htdk8ZtmWP61/3IvcCJc/m0EKYuA53b9yAYGcVPkIQLgfQEmWTNk/xMUBkpXG4j+LuZAKQZ0XQG/Dq8BeecI/cqu4R66357+Z1/0YARr0v0GQbNN/7NW/vrwsvangEsCUjAVudW1weZSMBWR0eXBllJOUjAJmOJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwFLEYaUjAFDlHSUUpQu"
35
+ },
36
+ "_last_episode_starts": {
37
+ ":type:": "<class 'numpy.ndarray'>",
38
+ ":serialized:": "gAWVdAAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYBAAAAAAAAAAGUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwGFlIwBQ5R0lFKULg=="
39
+ },
40
+ "_last_original_obs": {
41
+ ":type:": "<class 'numpy.ndarray'>",
42
+ ":serialized:": "gAWV/QAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJaIAAAAAAAAALb6nhNC0fI/s5BKsgx84j+is/BK+/jLv9Gbb7kjDYU/qWMcs9L86j8rHsvCXQGeP/2GmAmYAJo/BRntIk4r2789d/dAYS4KQK9pFCZOr9E/ezdTFZfH+T+t7jVzXfAbQCPSSUy6RbA/aLNsJ+Vj77+ZJ02Q5gvgv/wzhrykGNi/UHY3zgdyCsCUjAVudW1weZSMBWR0eXBllJOUjAJmOJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwFLEYaUjAFDlHSUUpQu"
43
+ },
44
+ "_episode_num": 1910,
45
+ "use_sde": false,
46
+ "sde_sample_freq": -1,
47
+ "_current_progress_remaining": 0.11000100000000002,
48
+ "_stats_window_size": 100,
49
+ "ep_info_buffer": {
50
+ ":type:": "<class 'collections.deque'>",
51
+ ":serialized:": "gAWVOwwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQK/hf52Qnx+MAWyUTegDjAF0lEdAt43z+irT6XV9lChoBkdArypDOX3QD2gHTegDaAhHQLeVGLNfPX11fZQoaAZHQK9Akk/KQq9oB03oA2gIR0C3nDj3mFJydX2UKGgGR0CvQWnqu8sdaAdN6ANoCEdAt6Ndz5oGp3V9lChoBkdAryHms5n14GgHTegDaAhHQLeqfALiMpB1fZQoaAZHQLAZD6RQrMFoB03oA2gIR0C3tOB4yGi6dX2UKGgGR0CvH9oO6NEPaAdN6ANoCEdAt7wEsiB5HHV9lChoBkdAr+VsVvddmmgHTegDaAhHQLfDIj+aScN1fZQoaAZHQK9kvat9x6xoB03oA2gIR0C3yjz544ZNdX2UKGgGR0Cv0Grb5/LDaAdN6ANoCEdAt9FarzXjEXV9lChoBkdArxFVHavicWgHTegDaAhHQLfb5FDOTq11fZQoaAZHQK9w4jOcDr9oB03oA2gIR0C34wa11GLDdX2UKGgGR0Cum8RQaaTfaAdN6ANoCEdAt+op2FFlTXV9lChoBkdAr2v+qYJE6WgHTegDaAhHQLfxSHaN+9d1fZQoaAZHQK/0raakRBhoB03oA2gIR0C3+G5Grjo7dX2UKGgGR0CKp/qEeyRkaAdNBQFoCEdAt/pH3L3bmHV9lChoBkdArweUkY4yXWgHTegDaAhHQLgEqYao/A11fZQoaAZHQK/gplMh5gRoB03oA2gIR0C4C8PfTCtSdX2UKGgGR0CvfFKrBCUpaAdN6ANoCEdAuBLchA4XGnV9lChoBkdAr+BTY9Pk72gHTegDaAhHQLgaAdadMCd1fZQoaAZHQK3Jm8kleGBoB03oA2gIR0C4IRs5Ke05dX2UKGgGR0CvLJEOZssQaAdN6ANoCEdAuCuQdGRV63V9lChoBkdAriL/MyJsPGgHTegDaAhHQLgytr8iwB51fZQoaAZHQK7MQ2y9mHxoB03oA2gIR0C4OeNxp+MIdX2UKGgGR0Cu7j9HlOoHaAdN6ANoCEdAuEED6guh9XV9lChoBkdArxA3N3W4E2gHTegDaAhHQLhIH1K5Cnh1fZQoaAZHQK8PM2hqTKVoB03oA2gIR0C4UowzYVZcdX2UKGgGR0CwCiIOpbUxaAdN6ANoCEdAuFmmrT6SDHV9lChoBkdArzIhyQxN7GgHTegDaAhHQLhgw+kP+XJ1fZQoaAZHQK4kzhQWN3poB03oA2gIR0C4Z+BtYSxrdX2UKGgGR0Ct90hCMPz4aAdN6ANoCEdAuG76mvW6LHV9lChoBkdAr4EUgdOqN2gHTegDaAhHQLh5f5Jbt7d1fZQoaAZHQK9jH+FUQ05oB03oA2gIR0C4gKtXPqs2dX2UKGgGR0CwBun/giu/aAdN6ANoCEdAuIfRinYQKHV9lChoBkdAr27Q+OfdymgHTegDaAhHQLiPBXXRPXV1fZQoaAZHQK+MoAAhje9oB03oA2gIR0C4liAbp/wzdX2UKGgGR0Cvl48CgbqAaAdN6ANoCEdAuKCBbGFSKnV9lChoBkdAr/EsBIWgvmgHTegDaAhHQLinn96kZaV1fZQoaAZHQK/VOE/SpitoB03oA2gIR0C4rsUs4DLbdX2UKGgGR0CwOCXbdrO8aAdN6ANoCEdAuLXh0aIeo3V9lChoBkdAsCt/p3X7L2gHTegDaAhHQLi9AmHP/rB1fZQoaAZHQK8xE1n/T9doB03oA2gIR0C4x2/n8sMBdX2UKGgGR0CwQVEU9IPLaAdN6ANoCEdAuM6o9/z8QHV9lChoBkdArsO5YDDCQGgHTegDaAhHQLjVyU34sVd1fZQoaAZHQK8oVHG0eEJoB03oA2gIR0C43O26bvw3dX2UKGgGR0Cv7dLi++M7aAdN6ANoCEdAuOQKjBVMmHV9lChoBkdAg4KoUahpQGgHTQMBaAhHQLjpNB1LamJ1fZQoaAZHQK/LbUn5SFZoB03oA2gIR0C48FLidat+dX2UKGgGR0CvpiTk6tDEaAdN6ANoCEdAuPdxnrY5DXV9lChoBkdAsBID7di2D2gHTegDaAhHQLj+jRDCxeN1fZQoaAZHQK9pJBbfP5ZoB03oA2gIR0C5Baqvq1PWdX2UKGgGR0CwEsV4Pf8/aAdN6ANoCEdAuRAguOCGvnV9lChoBkdAr42e/JvHcWgHTegDaAhHQLkXSinYQJ51fZQoaAZHQK8vH7gsK9hoB03oA2gIR0C5HnLtmcvvdX2UKGgGR0CvQ76e5Fw2aAdN6ANoCEdAuSWhvMr3CnV9lChoBkdArzq4evIOpmgHTegDaAhHQLksv0163RZ1fZQoaAZHQK+PsFRpDeFoB03oA2gIR0C5NoPykKu0dX2UKGgGR0CwFWozi0fHaAdN6ANoCEdAuT2gu7HyVnV9lChoBkdAYJfD1GsmwGgHS01oCEdAuT4siml67nV9lChoBkdAYtrjCpFTemgHS1poCEdAuT7PspoboHV9lChoBkdAdxL7YTTOPmgHS4hoCEdAuT/HLgXMyXV9lChoBkdAsAaVXFLnLmgHTegDaAhHQLlG7RzRx951fZQoaAZHQK/mTW8yvcJoB03oA2gIR0C5TgVea8YidX2UKGgGR0CvTykE1VHXaAdN6ANoCEdAuVUgJTl1bXV9lChoBkdArrkDvuw5emgHTegDaAhHQLle+3vhIe51fZQoaAZHQK/WG6TW5H5oB03oA2gIR0C5ZjFbzK9xdX2UKGgGR0BnlZmZmZmaaAdLVmgIR0C5Zs6QFLWadX2UKGgGR0CwN4hKxs2vaAdN6ANoCEdAuW3vEn9ehXV9lChoBkdAsBUlLBbfQGgHTegDaAhHQLl1D5hBqsV1fZQoaAZHQK+N2P7vXshoB03oA2gIR0C5fCsuzyBkdX2UKGgGR0Cu1Zdl2/zraAdN6ANoCEdAuYRcoqkM1HV9lChoBkdAr+QdRpDeCWgHTegDaAhHQLmLd0XgtOF1fZQoaAZHQK8YMp0fYBhoB03oA2gIR0C5kpG/8EV4dX2UKGgGR0Cvtz7aAWi2aAdN6ANoCEdAuZmsqe9SM3V9lChoBkdAr27atV7x/mgHTegDaAhHQLmg6atcOb11fZQoaAZHQLAXkdkrf+FoB03oA2gIR0C5q1kmtyPudX2UKGgGR0CwN57iADq4aAdN6ANoCEdAubKATHsC1nV9lChoBkdAr2r16HCXQmgHTegDaAhHQLm5prhisn11fZQoaAZHQK83tpfx+a1oB03oA2gIR0C5wNKgh8pkdX2UKGgGR0CvZKhLXcxkaAdN6ANoCEdAucfz961LJ3V9lChoBkdAsDM+9Jz1b2gHTegDaAhHQLnSVpc5bQl1fZQoaAZHQK/BjPPcBU9oB03oA2gIR0C52XKtga3rdX2UKGgGR0CwK8dWluWKaAdN6ANoCEdAueCVbW3BpHV9lChoBkdAsAY6KEWZZ2gHTegDaAhHQLnnr47zTWp1fZQoaAZHQGTqrh73PAxoB0tUaAhHQLnoSAh0Qsh1fZQoaAZHQFYEpSrHU+doB0tAaAhHQLnovKaG5+Z1fZQoaAZHQK/ytcBU70ZoB03oA2gIR0C579pGjKxLdX2UKGgGR0Cv6NNO/L1VaAdN6ANoCEdAufpXPfKp1nV9lChoBkdAVKM68xsVL2gHSz9oCEdAufrJ0uDjBHV9lChoBkdAsFGwEC/47GgHTegDaAhHQLoB8ix3V091fZQoaAZHQKD+lErGza9oB00yAmgIR0C6BfeZ5Rj0dX2UKGgGR0Cv2D2TxG2DaAdN6ANoCEdAug0ZHd43WHV9lChoBkdAsD3WT3Zf2WgHTegDaAhHQLoUPPqLS/l1fZQoaAZHQLBvfVy3kPtoB03oA2gIR0C6HkkWIoE0dX2UKGgGR0Cv32vfKp1iaAdN6ANoCEdAuiVmp5u63HV9lChoBkdAdGo5+H8CP2gHS35oCEdAuiZMFGG21HV9lChoBkdAdBKUKiO/+WgHS35oCEdAuicxdjXnQ3V9lChoBkdAsDHm4ZuQ62gHTegDaAhHQLouTO+qR2d1fZQoaAZHQK/2tg75mAdoB03oA2gIR0C6NXMFINExdX2UKGgGR0CwUWMgMc6vaAdN6ANoCEdAujyOkzoECHVlLg=="
52
+ },
53
+ "ep_success_buffer": {
54
+ ":type:": "<class 'collections.deque'>",
55
+ ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
56
+ },
57
+ "_n_updates": 879999,
58
+ "buffer_size": 1000000,
59
+ "batch_size": 256,
60
+ "learning_starts": 10000,
61
+ "tau": 0.005,
62
+ "gamma": 0.99,
63
+ "gradient_steps": 1,
64
+ "optimize_memory_usage": false,
65
+ "replay_buffer_class": {
66
+ ":type:": "<class 'abc.ABCMeta'>",
67
+ ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
68
+ "__module__": "stable_baselines3.common.buffers",
69
+ "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'next_observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'dones': <class 'numpy.ndarray'>, 'timeouts': <class 'numpy.ndarray'>}",
70
+ "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
71
+ "__init__": "<function ReplayBuffer.__init__ at 0x11d2c00e0>",
72
+ "add": "<function ReplayBuffer.add at 0x11d2c0220>",
73
+ "sample": "<function ReplayBuffer.sample at 0x11d2c02c0>",
74
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x11d2c0360>",
75
+ "_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x11d2c0400>)>",
76
+ "__abstractmethods__": "frozenset()",
77
+ "_abc_impl": "<_abc._abc_data object at 0x11d2b8bc0>"
78
+ },
79
+ "replay_buffer_kwargs": {},
80
+ "train_freq": {
81
+ ":type:": "<class 'stable_baselines3.common.type_aliases.TrainFreq'>",
82
+ ":serialized:": "gAWVYQAAAAAAAACMJXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi50eXBlX2FsaWFzZXOUjAlUcmFpbkZyZXGUk5RLAWgAjBJUcmFpbkZyZXF1ZW5jeVVuaXSUk5SMBHN0ZXCUhZRSlIaUgZQu"
83
+ },
84
+ "use_sde_at_warmup": false,
85
+ "target_entropy": -6.0,
86
+ "ent_coef": "auto",
87
+ "target_update_interval": 1,
88
+ "observation_space": {
89
+ ":type:": "<class 'gymnasium.spaces.box.Box'>",
90
+ ":serialized:": "gAWVsQIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY4lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWEQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJRoCIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksRhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoESiWEQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJRoFUsRhZRoGXSUUpSMBl9zaGFwZZRLEYWUjANsb3eUaBEologAAAAAAAAAAAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/5RoC0sRhZRoGXSUUpSMBGhpZ2iUaBEologAAAAAAAAAAAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwf5RoC0sRhZRoGXSUUpSMCGxvd19yZXBylIwELWluZpSMCWhpZ2hfcmVwcpSMA2luZpSMCl9ucF9yYW5kb22UTnViLg==",
91
+ "dtype": "float64",
92
+ "bounded_below": "[False False False False False False False False False False False False\n False False False False False]",
93
+ "bounded_above": "[False False False False False False False False False False False False\n False False False False False]",
94
+ "_shape": [
95
+ 17
96
+ ],
97
+ "low": "[-inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf\n -inf -inf -inf]",
98
+ "high": "[inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf]",
99
+ "low_repr": "-inf",
100
+ "high_repr": "inf",
101
+ "_np_random": null
102
+ },
103
+ "action_space": {
104
+ ":type:": "<class 'gymnasium.spaces.box.Box'>",
105
+ ":serialized:": "gAWVgAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWBgAAAAAAAAABAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLBoWUjAFDlHSUUpSMDWJvdW5kZWRfYWJvdmWUaBEolgYAAAAAAAAAAQEBAQEBlGgVSwaFlGgZdJRSlIwGX3NoYXBllEsGhZSMA2xvd5RoESiWGAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/AACAvwAAgL+UaAtLBoWUaBl0lFKUjARoaWdolGgRKJYYAAAAAAAAAAAAgD8AAIA/AACAPwAAgD8AAIA/AACAP5RoC0sGhZRoGXSUUpSMCGxvd19yZXBylIwELTEuMJSMCWhpZ2hfcmVwcpSMAzEuMJSMCl9ucF9yYW5kb22UjBRudW1weS5yYW5kb20uX3BpY2tsZZSMEF9fZ2VuZXJhdG9yX2N0b3KUk5SMBVBDRzY0lGgyjBRfX2JpdF9nZW5lcmF0b3JfY3RvcpSTlIaUUpR9lCiMDWJpdF9nZW5lcmF0b3KUjAVQQ0c2NJSMBXN0YXRllH2UKGg9ihGgqc+RHhM5RJ1rPpIAh6CHAIwDaW5jlIoRXUu8TbDMELdjJ9O9GwRqgwB1jApoYXNfdWludDMylEsAjAh1aW50ZWdlcpRLAHVidWIu",
106
+ "dtype": "float32",
107
+ "bounded_below": "[ True True True True True True]",
108
+ "bounded_above": "[ True True True True True True]",
109
+ "_shape": [
110
+ 6
111
+ ],
112
+ "low": "[-1. -1. -1. -1. -1. -1.]",
113
+ "high": "[1. 1. 1. 1. 1. 1.]",
114
+ "low_repr": "-1.0",
115
+ "high_repr": "1.0",
116
+ "_np_random": "Generator(PCG64)"
117
+ },
118
+ "n_envs": 1,
119
+ "lr_schedule": {
120
+ ":type:": "<class 'function'>",
121
+ ":serialized:": "gAWV6gMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLBUsTQyaVAZcAdAEAAAAAAAAAAAIAiQF8AKsBAAAAAAAAqwEAAAAAAABTAJROhZSMBWZsb2F0lIWUjBJwcm9ncmVzc19yZW1haW5pbmeUhZSMXi9Vc2Vycy9qcmVuL2FuYWNvbmRhMy9lbnZzLzQ3NTYvbGliL3B5dGhvbjMuMTIvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjAg8bGFtYmRhPpSMIWdldF9zY2hlZHVsZV9mbi48bG9jYWxzPi48bGFtYmRhPpRLYUMS+IAApGWpTtA7TdMsTtMmT4AAlEMAlIwOdmFsdWVfc2NoZWR1bGWUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flIxeL1VzZXJzL2pyZW4vYW5hY29uZGEzL2VudnMvNDc1Ni9saWIvcHl0aG9uMy4xMi9zaXRlLXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlGgAjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoI32UfZQoaBpoD4wMX19xdWFsbmFtZV9flGgQjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgbjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOUaAIoaAcoSwFLAEsASwFLAUsTQwiVAZcAiQFTAJRoCSmMAV+UhZRoDowEZnVuY5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUS4VDCPiAANgPEogKlGgSjAN2YWyUhZQpdJRSlGgXTk5oHylSlIWUdJRSlGglaD99lH2UKGgaaDVoKGg2aCl9lGgrTmgsTmgtaBtoLk5oL2gxRz8zqSowVTJhhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjCFlFKUhZRoRl2UaEh9lHWGlIZSMC4="
122
+ },
123
+ "batch_norm_stats": [],
124
+ "batch_norm_stats_target": []
125
+ }
SAC-Walker2d-v4/ent_coef_optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34b870694f4e03e51c25d262896e470a33b6e6d87176e5eee1d0b9fb5651b42d
3
+ size 1940
SAC-Walker2d-v4/policy.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8aea976d87b4906180a6bde9b2205acde4c9adbeca3eb1e16216f83fc0913c6
3
+ size 1459958
SAC-Walker2d-v4/pytorch_variables.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7873096a966928e45675a7265358ad6ff818a77718d034d9b8cd83586814a46
3
+ size 1180
SAC-Walker2d-v4/system_info.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ - OS: macOS-14.4.1-arm64-arm-64bit Darwin Kernel Version 23.4.0: Fri Mar 15 00:12:41 PDT 2024; root:xnu-10063.101.17~1/RELEASE_ARM64_T8103
2
+ - Python: 3.12.3
3
+ - Stable-Baselines3: 2.3.2
4
+ - PyTorch: 2.3.1
5
+ - GPU Enabled: False
6
+ - Numpy: 1.26.4
7
+ - Cloudpickle: 3.0.0
8
+ - Gymnasium: 0.29.1
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLnNhYy5wb2xpY2llc5SMCVNBQ1BvbGljeZSTlC4=", "__module__": "stable_baselines3.sac.policies", "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}", "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ", "__init__": "<function SACPolicy.__init__ at 0x11d347240>", "_build": "<function SACPolicy._build at 0x11d347880>", "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x11d347920>", "reset_noise": "<function SACPolicy.reset_noise at 0x11d3479c0>", "make_actor": "<function SACPolicy.make_actor at 0x11d347a60>", "make_critic": "<function SACPolicy.make_critic at 0x11d347b00>", "forward": "<function SACPolicy.forward at 0x11d347ba0>", "_predict": "<function SACPolicy._predict at 0x11d347c40>", "set_training_mode": "<function SACPolicy.set_training_mode at 0x11d347ce0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x11d360340>"}, "verbose": 0, "policy_kwargs": {"use_sde": false}, "num_timesteps": 890000, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1718033144906360391, "learning_rate": 0.0003, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWV/QAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJaIAAAAAAAAAHMeCHpB3PI/DFpikkrE4j8qMOIo+F7Fv8aKck+w3IY/rp5m/LHE6j9Htdk8ZtmWP61/3IvcCJc/m0EKYuA53b9yAYGcVPkIQLgfQEmWTNk/xMUBkpXG4j+LuZAKQZ0XQG/Dq8BeecI/cqu4R66357+Z1/0YARr0v0GQbNN/7NW/vrwsvangEsCUjAVudW1weZSMBWR0eXBllJOUjAJmOJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwFLEYaUjAFDlHSUUpQu"}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdAAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYBAAAAAAAAAAGUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwGFlIwBQ5R0lFKULg=="}, "_last_original_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWV/QAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJaIAAAAAAAAALb6nhNC0fI/s5BKsgx84j+is/BK+/jLv9Gbb7kjDYU/qWMcs9L86j8rHsvCXQGeP/2GmAmYAJo/BRntIk4r2789d/dAYS4KQK9pFCZOr9E/ezdTFZfH+T+t7jVzXfAbQCPSSUy6RbA/aLNsJ+Vj77+ZJ02Q5gvgv/wzhrykGNi/UHY3zgdyCsCUjAVudW1weZSMBWR0eXBllJOUjAJmOJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwFLEYaUjAFDlHSUUpQu"}, "_episode_num": 1910, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": 0.11000100000000002, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVOwwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQK/hf52Qnx+MAWyUTegDjAF0lEdAt43z+irT6XV9lChoBkdArypDOX3QD2gHTegDaAhHQLeVGLNfPX11fZQoaAZHQK9Akk/KQq9oB03oA2gIR0C3nDj3mFJydX2UKGgGR0CvQWnqu8sdaAdN6ANoCEdAt6Ndz5oGp3V9lChoBkdAryHms5n14GgHTegDaAhHQLeqfALiMpB1fZQoaAZHQLAZD6RQrMFoB03oA2gIR0C3tOB4yGi6dX2UKGgGR0CvH9oO6NEPaAdN6ANoCEdAt7wEsiB5HHV9lChoBkdAr+VsVvddmmgHTegDaAhHQLfDIj+aScN1fZQoaAZHQK9kvat9x6xoB03oA2gIR0C3yjz544ZNdX2UKGgGR0Cv0Grb5/LDaAdN6ANoCEdAt9FarzXjEXV9lChoBkdArxFVHavicWgHTegDaAhHQLfb5FDOTq11fZQoaAZHQK9w4jOcDr9oB03oA2gIR0C34wa11GLDdX2UKGgGR0Cum8RQaaTfaAdN6ANoCEdAt+op2FFlTXV9lChoBkdAr2v+qYJE6WgHTegDaAhHQLfxSHaN+9d1fZQoaAZHQK/0raakRBhoB03oA2gIR0C3+G5Grjo7dX2UKGgGR0CKp/qEeyRkaAdNBQFoCEdAt/pH3L3bmHV9lChoBkdArweUkY4yXWgHTegDaAhHQLgEqYao/A11fZQoaAZHQK/gplMh5gRoB03oA2gIR0C4C8PfTCtSdX2UKGgGR0CvfFKrBCUpaAdN6ANoCEdAuBLchA4XGnV9lChoBkdAr+BTY9Pk72gHTegDaAhHQLgaAdadMCd1fZQoaAZHQK3Jm8kleGBoB03oA2gIR0C4IRs5Ke05dX2UKGgGR0CvLJEOZssQaAdN6ANoCEdAuCuQdGRV63V9lChoBkdAriL/MyJsPGgHTegDaAhHQLgytr8iwB51fZQoaAZHQK7MQ2y9mHxoB03oA2gIR0C4OeNxp+MIdX2UKGgGR0Cu7j9HlOoHaAdN6ANoCEdAuEED6guh9XV9lChoBkdArxA3N3W4E2gHTegDaAhHQLhIH1K5Cnh1fZQoaAZHQK8PM2hqTKVoB03oA2gIR0C4UowzYVZcdX2UKGgGR0CwCiIOpbUxaAdN6ANoCEdAuFmmrT6SDHV9lChoBkdArzIhyQxN7GgHTegDaAhHQLhgw+kP+XJ1fZQoaAZHQK4kzhQWN3poB03oA2gIR0C4Z+BtYSxrdX2UKGgGR0Ct90hCMPz4aAdN6ANoCEdAuG76mvW6LHV9lChoBkdAr4EUgdOqN2gHTegDaAhHQLh5f5Jbt7d1fZQoaAZHQK9jH+FUQ05oB03oA2gIR0C4gKtXPqs2dX2UKGgGR0CwBun/giu/aAdN6ANoCEdAuIfRinYQKHV9lChoBkdAr27Q+OfdymgHTegDaAhHQLiPBXXRPXV1fZQoaAZHQK+MoAAhje9oB03oA2gIR0C4liAbp/wzdX2UKGgGR0Cvl48CgbqAaAdN6ANoCEdAuKCBbGFSKnV9lChoBkdAr/EsBIWgvmgHTegDaAhHQLinn96kZaV1fZQoaAZHQK/VOE/SpitoB03oA2gIR0C4rsUs4DLbdX2UKGgGR0CwOCXbdrO8aAdN6ANoCEdAuLXh0aIeo3V9lChoBkdAsCt/p3X7L2gHTegDaAhHQLi9AmHP/rB1fZQoaAZHQK8xE1n/T9doB03oA2gIR0C4x2/n8sMBdX2UKGgGR0CwQVEU9IPLaAdN6ANoCEdAuM6o9/z8QHV9lChoBkdArsO5YDDCQGgHTegDaAhHQLjVyU34sVd1fZQoaAZHQK8oVHG0eEJoB03oA2gIR0C43O26bvw3dX2UKGgGR0Cv7dLi++M7aAdN6ANoCEdAuOQKjBVMmHV9lChoBkdAg4KoUahpQGgHTQMBaAhHQLjpNB1LamJ1fZQoaAZHQK/LbUn5SFZoB03oA2gIR0C48FLidat+dX2UKGgGR0CvpiTk6tDEaAdN6ANoCEdAuPdxnrY5DXV9lChoBkdAsBID7di2D2gHTegDaAhHQLj+jRDCxeN1fZQoaAZHQK9pJBbfP5ZoB03oA2gIR0C5Baqvq1PWdX2UKGgGR0CwEsV4Pf8/aAdN6ANoCEdAuRAguOCGvnV9lChoBkdAr42e/JvHcWgHTegDaAhHQLkXSinYQJ51fZQoaAZHQK8vH7gsK9hoB03oA2gIR0C5HnLtmcvvdX2UKGgGR0CvQ76e5Fw2aAdN6ANoCEdAuSWhvMr3CnV9lChoBkdArzq4evIOpmgHTegDaAhHQLksv0163RZ1fZQoaAZHQK+PsFRpDeFoB03oA2gIR0C5NoPykKu0dX2UKGgGR0CwFWozi0fHaAdN6ANoCEdAuT2gu7HyVnV9lChoBkdAYJfD1GsmwGgHS01oCEdAuT4siml67nV9lChoBkdAYtrjCpFTemgHS1poCEdAuT7PspoboHV9lChoBkdAdxL7YTTOPmgHS4hoCEdAuT/HLgXMyXV9lChoBkdAsAaVXFLnLmgHTegDaAhHQLlG7RzRx951fZQoaAZHQK/mTW8yvcJoB03oA2gIR0C5TgVea8YidX2UKGgGR0CvTykE1VHXaAdN6ANoCEdAuVUgJTl1bXV9lChoBkdArrkDvuw5emgHTegDaAhHQLle+3vhIe51fZQoaAZHQK/WG6TW5H5oB03oA2gIR0C5ZjFbzK9xdX2UKGgGR0BnlZmZmZmaaAdLVmgIR0C5Zs6QFLWadX2UKGgGR0CwN4hKxs2vaAdN6ANoCEdAuW3vEn9ehXV9lChoBkdAsBUlLBbfQGgHTegDaAhHQLl1D5hBqsV1fZQoaAZHQK+N2P7vXshoB03oA2gIR0C5fCsuzyBkdX2UKGgGR0Cu1Zdl2/zraAdN6ANoCEdAuYRcoqkM1HV9lChoBkdAr+QdRpDeCWgHTegDaAhHQLmLd0XgtOF1fZQoaAZHQK8YMp0fYBhoB03oA2gIR0C5kpG/8EV4dX2UKGgGR0Cvtz7aAWi2aAdN6ANoCEdAuZmsqe9SM3V9lChoBkdAr27atV7x/mgHTegDaAhHQLmg6atcOb11fZQoaAZHQLAXkdkrf+FoB03oA2gIR0C5q1kmtyPudX2UKGgGR0CwN57iADq4aAdN6ANoCEdAubKATHsC1nV9lChoBkdAr2r16HCXQmgHTegDaAhHQLm5prhisn11fZQoaAZHQK83tpfx+a1oB03oA2gIR0C5wNKgh8pkdX2UKGgGR0CvZKhLXcxkaAdN6ANoCEdAucfz961LJ3V9lChoBkdAsDM+9Jz1b2gHTegDaAhHQLnSVpc5bQl1fZQoaAZHQK/BjPPcBU9oB03oA2gIR0C52XKtga3rdX2UKGgGR0CwK8dWluWKaAdN6ANoCEdAueCVbW3BpHV9lChoBkdAsAY6KEWZZ2gHTegDaAhHQLnnr47zTWp1fZQoaAZHQGTqrh73PAxoB0tUaAhHQLnoSAh0Qsh1fZQoaAZHQFYEpSrHU+doB0tAaAhHQLnovKaG5+Z1fZQoaAZHQK/ytcBU70ZoB03oA2gIR0C579pGjKxLdX2UKGgGR0Cv6NNO/L1VaAdN6ANoCEdAufpXPfKp1nV9lChoBkdAVKM68xsVL2gHSz9oCEdAufrJ0uDjBHV9lChoBkdAsFGwEC/47GgHTegDaAhHQLoB8ix3V091fZQoaAZHQKD+lErGza9oB00yAmgIR0C6BfeZ5Rj0dX2UKGgGR0Cv2D2TxG2DaAdN6ANoCEdAug0ZHd43WHV9lChoBkdAsD3WT3Zf2WgHTegDaAhHQLoUPPqLS/l1fZQoaAZHQLBvfVy3kPtoB03oA2gIR0C6HkkWIoE0dX2UKGgGR0Cv32vfKp1iaAdN6ANoCEdAuiVmp5u63HV9lChoBkdAdGo5+H8CP2gHS35oCEdAuiZMFGG21HV9lChoBkdAdBKUKiO/+WgHS35oCEdAuicxdjXnQ3V9lChoBkdAsDHm4ZuQ62gHTegDaAhHQLouTO+qR2d1fZQoaAZHQK/2tg75mAdoB03oA2gIR0C6NXMFINExdX2UKGgGR0CwUWMgMc6vaAdN6ANoCEdAujyOkzoECHVlLg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 879999, "buffer_size": 1000000, "batch_size": 256, "learning_starts": 10000, "tau": 0.005, "gamma": 0.99, "gradient_steps": 1, "optimize_memory_usage": false, "replay_buffer_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==", "__module__": "stable_baselines3.common.buffers", "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'next_observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'dones': <class 'numpy.ndarray'>, 'timeouts': <class 'numpy.ndarray'>}", "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ", "__init__": "<function ReplayBuffer.__init__ at 0x11d2c00e0>", "add": "<function ReplayBuffer.add at 0x11d2c0220>", "sample": "<function ReplayBuffer.sample at 0x11d2c02c0>", "_get_samples": "<function ReplayBuffer._get_samples at 0x11d2c0360>", "_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x11d2c0400>)>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x11d2b8bc0>"}, "replay_buffer_kwargs": {}, "train_freq": {":type:": "<class 'stable_baselines3.common.type_aliases.TrainFreq'>", ":serialized:": "gAWVYQAAAAAAAACMJXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi50eXBlX2FsaWFzZXOUjAlUcmFpbkZyZXGUk5RLAWgAjBJUcmFpbkZyZXF1ZW5jeVVuaXSUk5SMBHN0ZXCUhZRSlIaUgZQu"}, "use_sde_at_warmup": false, "target_entropy": -6.0, "ent_coef": "auto", "target_update_interval": 1, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVsQIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY4lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWEQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJRoCIwCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksRhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoESiWEQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJRoFUsRhZRoGXSUUpSMBl9zaGFwZZRLEYWUjANsb3eUaBEologAAAAAAAAAAAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/wAAAAAAAPD/AAAAAAAA8P8AAAAAAADw/5RoC0sRhZRoGXSUUpSMBGhpZ2iUaBEologAAAAAAAAAAAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwfwAAAAAAAPB/AAAAAAAA8H8AAAAAAADwf5RoC0sRhZRoGXSUUpSMCGxvd19yZXBylIwELWluZpSMCWhpZ2hfcmVwcpSMA2luZpSMCl9ucF9yYW5kb22UTnViLg==", "dtype": "float64", "bounded_below": "[False False False False False False False False False False False False\n False False False False False]", "bounded_above": "[False False False False False False False False False False False False\n False False False False False]", "_shape": [17], "low": "[-inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf\n -inf -inf -inf]", "high": "[inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf]", "low_repr": "-inf", "high_repr": "inf", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVgAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lIwFZHR5cGWUk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWBgAAAAAAAAABAQEBAQGUaAiMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLBoWUjAFDlHSUUpSMDWJvdW5kZWRfYWJvdmWUaBEolgYAAAAAAAAAAQEBAQEBlGgVSwaFlGgZdJRSlIwGX3NoYXBllEsGhZSMA2xvd5RoESiWGAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/AACAvwAAgL+UaAtLBoWUaBl0lFKUjARoaWdolGgRKJYYAAAAAAAAAAAAgD8AAIA/AACAPwAAgD8AAIA/AACAP5RoC0sGhZRoGXSUUpSMCGxvd19yZXBylIwELTEuMJSMCWhpZ2hfcmVwcpSMAzEuMJSMCl9ucF9yYW5kb22UjBRudW1weS5yYW5kb20uX3BpY2tsZZSMEF9fZ2VuZXJhdG9yX2N0b3KUk5SMBVBDRzY0lGgyjBRfX2JpdF9nZW5lcmF0b3JfY3RvcpSTlIaUUpR9lCiMDWJpdF9nZW5lcmF0b3KUjAVQQ0c2NJSMBXN0YXRllH2UKGg9ihGgqc+RHhM5RJ1rPpIAh6CHAIwDaW5jlIoRXUu8TbDMELdjJ9O9GwRqgwB1jApoYXNfdWludDMylEsAjAh1aW50ZWdlcpRLAHVidWIu", "dtype": "float32", "bounded_below": "[ True True True True True True]", "bounded_above": "[ True True True True True True]", "_shape": [6], "low": "[-1. -1. -1. -1. -1. -1.]", "high": "[1. 1. 1. 1. 1. 1.]", "low_repr": "-1.0", "high_repr": "1.0", "_np_random": "Generator(PCG64)"}, "n_envs": 1, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWV6gMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLBUsTQyaVAZcAdAEAAAAAAAAAAAIAiQF8AKsBAAAAAAAAqwEAAAAAAABTAJROhZSMBWZsb2F0lIWUjBJwcm9ncmVzc19yZW1haW5pbmeUhZSMXi9Vc2Vycy9qcmVuL2FuYWNvbmRhMy9lbnZzLzQ3NTYvbGliL3B5dGhvbjMuMTIvc2l0ZS1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjAg8bGFtYmRhPpSMIWdldF9zY2hlZHVsZV9mbi48bG9jYWxzPi48bGFtYmRhPpRLYUMS+IAApGWpTtA7TdMsTtMmT4AAlEMAlIwOdmFsdWVfc2NoZWR1bGWUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flIxeL1VzZXJzL2pyZW4vYW5hY29uZGEzL2VudnMvNDc1Ni9saWIvcHl0aG9uMy4xMi9zaXRlLXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlGgAjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoI32UfZQoaBpoD4wMX19xdWFsbmFtZV9flGgQjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgbjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOUaAIoaAcoSwFLAEsASwFLAUsTQwiVAZcAiQFTAJRoCSmMAV+UhZRoDowEZnVuY5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUS4VDCPiAANgPEogKlGgSjAN2YWyUhZQpdJRSlGgXTk5oHylSlIWUdJRSlGglaD99lH2UKGgaaDVoKGg2aCl9lGgrTmgsTmgtaBtoLk5oL2gxRz8zqSowVTJhhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjCFlFKUhZRoRl2UaEh9lHWGlIZSMC4="}, "batch_norm_stats": [], "batch_norm_stats_target": [], "system_info": {"OS": "macOS-14.4.1-arm64-arm-64bit Darwin Kernel Version 23.4.0: Fri Mar 15 00:12:41 PDT 2024; root:xnu-10063.101.17~1/RELEASE_ARM64_T8103", "Python": "3.12.3", "Stable-Baselines3": "2.3.2", "PyTorch": "2.3.1", "GPU Enabled": "False", "Numpy": "1.26.4", "Cloudpickle": "3.0.0", "Gymnasium": "0.29.1"}}
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6402fcb5d8868f282c384bd028f492ec1164ce396ea833e769870973a22c515
3
+ size 1203744
results.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"mean_reward": 4201.8990722, "std_reward": 62.22525107252176, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2024-06-10T12:46:01.390393"}