araffin commited on
Commit
3e66064
1 Parent(s): 8f4d2fb

Initial Commit

Browse files
Files changed (4) hide show
  1. README.md +20 -0
  2. dqn-MountainCar-v0.zip +1 -1
  3. dqn-MountainCar-v0/data +13 -13
  4. results.json +1 -1
README.md CHANGED
@@ -32,6 +32,8 @@ with hyperparameter optimization and pre-trained agents included.
32
  ## Usage (with SB3 RL Zoo)
33
 
34
  RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo
 
 
35
 
36
  ```
37
  # Download model and save it into the logs/ folder
@@ -45,3 +47,21 @@ python train.py --algo dqn --env MountainCar-v0 -f logs/
45
  # Upload the model and generate video (when possible)
46
  python -m utils.push_to_hub --algo dqn --env MountainCar-v0 -f logs/ -orga sb3
47
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  ## Usage (with SB3 RL Zoo)
33
 
34
  RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo
35
+ SB3: https://github.com/DLR-RM/stable-baselines3
36
+ SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
37
 
38
  ```
39
  # Download model and save it into the logs/ folder
 
47
  # Upload the model and generate video (when possible)
48
  python -m utils.push_to_hub --algo dqn --env MountainCar-v0 -f logs/ -orga sb3
49
  ```
50
+
51
+ ## Hyperparameters
52
+ ```python
53
+ OrderedDict([('batch_size', 128),
54
+ ('buffer_size', 10000),
55
+ ('exploration_final_eps', 0.07),
56
+ ('exploration_fraction', 0.2),
57
+ ('gamma', 0.98),
58
+ ('gradient_steps', 8),
59
+ ('learning_rate', 0.004),
60
+ ('learning_starts', 1000),
61
+ ('n_timesteps', 120000.0),
62
+ ('policy', 'MlpPolicy'),
63
+ ('policy_kwargs', 'dict(net_arch=[256, 256])'),
64
+ ('target_update_interval', 600),
65
+ ('train_freq', 16),
66
+ ('normalize', False)])
67
+ ```
dqn-MountainCar-v0.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5d02d795e9120efb19cd1f65541a7f040eacf9b4253bfd438ebc35b64113a14
3
  size 1103767
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc7f7861b17461282de2b85694d9ca48e97a97c650513acf42021f70182f56db
3
  size 1103767
dqn-MountainCar-v0/data CHANGED
@@ -4,15 +4,15 @@
4
  ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCURRTlBvbGljeZSTlC4=",
5
  "__module__": "stable_baselines3.dqn.policies",
6
  "__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
- "__init__": "<function DQNPolicy.__init__ at 0x7fd067581b00>",
8
- "_build": "<function DQNPolicy._build at 0x7fd067581b90>",
9
- "make_q_net": "<function DQNPolicy.make_q_net at 0x7fd067581c20>",
10
- "forward": "<function DQNPolicy.forward at 0x7fd067581cb0>",
11
- "_predict": "<function DQNPolicy._predict at 0x7fd067581d40>",
12
- "_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x7fd067581dd0>",
13
- "set_training_mode": "<function DQNPolicy.set_training_mode at 0x7fd067581e60>",
14
  "__abstractmethods__": "frozenset()",
15
- "_abc_impl": "<_abc_data object at 0x7fd0675794e0>"
16
  },
17
  "verbose": 1,
18
  "policy_kwargs": {
@@ -89,12 +89,12 @@
89
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
90
  "__module__": "stable_baselines3.common.buffers",
91
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device:\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
92
- "__init__": "<function ReplayBuffer.__init__ at 0x7fd0679f2ef0>",
93
- "add": "<function ReplayBuffer.add at 0x7fd0679f2f80>",
94
- "sample": "<function ReplayBuffer.sample at 0x7fd0679e7680>",
95
- "_get_samples": "<function ReplayBuffer._get_samples at 0x7fd0679e7710>",
96
  "__abstractmethods__": "frozenset()",
97
- "_abc_impl": "<_abc_data object at 0x7fd067a52480>"
98
  },
99
  "replay_buffer_kwargs": {},
100
  "train_freq": {
 
4
  ":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCURRTlBvbGljeZSTlC4=",
5
  "__module__": "stable_baselines3.dqn.policies",
6
  "__doc__": "\n Policy class with Q-Value Net and target net for DQN\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function DQNPolicy.__init__ at 0x7f8b5764cb00>",
8
+ "_build": "<function DQNPolicy._build at 0x7f8b5764cb90>",
9
+ "make_q_net": "<function DQNPolicy.make_q_net at 0x7f8b5764cc20>",
10
+ "forward": "<function DQNPolicy.forward at 0x7f8b5764ccb0>",
11
+ "_predict": "<function DQNPolicy._predict at 0x7f8b5764cd40>",
12
+ "_get_constructor_parameters": "<function DQNPolicy._get_constructor_parameters at 0x7f8b5764cdd0>",
13
+ "set_training_mode": "<function DQNPolicy.set_training_mode at 0x7f8b5764ce60>",
14
  "__abstractmethods__": "frozenset()",
15
+ "_abc_impl": "<_abc_data object at 0x7f8b576444e0>"
16
  },
17
  "verbose": 1,
18
  "policy_kwargs": {
 
89
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
90
  "__module__": "stable_baselines3.common.buffers",
91
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device:\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
92
+ "__init__": "<function ReplayBuffer.__init__ at 0x7f8b57abeef0>",
93
+ "add": "<function ReplayBuffer.add at 0x7f8b57abef80>",
94
+ "sample": "<function ReplayBuffer.sample at 0x7f8b57ab3680>",
95
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x7f8b57ab3710>",
96
  "__abstractmethods__": "frozenset()",
97
+ "_abc_impl": "<_abc_data object at 0x7f8b57b1d480>"
98
  },
99
  "replay_buffer_kwargs": {},
100
  "train_freq": {
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": -103.4, "std_reward": 7.4859869088851605, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2022-05-20T09:54:22.907164"}
 
1
+ {"mean_reward": -103.4, "std_reward": 7.4859869088851605, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2022-05-20T09:58:38.915474"}