pableitorr commited on
Commit
df29a87
·
verified ·
1 Parent(s): e2cacfb

Initial commit

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ model-index:
16
  type: SpaceInvadersNoFrameskip-v4
17
  metrics:
18
  - type: mean_reward
19
- value: 656.00 +/- 153.60
20
  name: mean_reward
21
  verified: false
22
  ---
@@ -63,7 +63,7 @@ python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f lo
63
  ## Hyperparameters
64
  ```python
65
  OrderedDict([('batch_size', 32),
66
- ('buffer_size', 100000),
67
  ('env_wrapper',
68
  ['stable_baselines3.common.atari_wrappers.AtariWrapper']),
69
  ('exploration_final_eps', 0.01),
@@ -72,8 +72,8 @@ OrderedDict([('batch_size', 32),
72
  ('gradient_steps', 1),
73
  ('learning_rate', 0.0001),
74
  ('learning_starts', 100000),
75
- ('n_timesteps', 1000000.0),
76
- ('optimize_memory_usage', False),
77
  ('policy', 'CnnPolicy'),
78
  ('target_update_interval', 1000),
79
  ('train_freq', 4),
 
16
  type: SpaceInvadersNoFrameskip-v4
17
  metrics:
18
  - type: mean_reward
19
+ value: 587.00 +/- 118.37
20
  name: mean_reward
21
  verified: false
22
  ---
 
63
  ## Hyperparameters
64
  ```python
65
  OrderedDict([('batch_size', 32),
66
+ ('buffer_size', 10000),
67
  ('env_wrapper',
68
  ['stable_baselines3.common.atari_wrappers.AtariWrapper']),
69
  ('exploration_final_eps', 0.01),
 
72
  ('gradient_steps', 1),
73
  ('learning_rate', 0.0001),
74
  ('learning_starts', 100000),
75
+ ('n_timesteps', 10000000.0),
76
+ ('optimize_memory_usage', True),
77
  ('policy', 'CnnPolicy'),
78
  ('target_update_interval', 1000),
79
  ('train_freq', 4),
args.yml CHANGED
@@ -1,34 +1,24 @@
1
  !!python/object/apply:collections.OrderedDict
2
  - - - algo
3
  - dqn
4
- - - conf_file
5
- - dqn.yml
6
- - - device
7
- - auto
8
  - - env
9
  - SpaceInvadersNoFrameskip-v4
10
  - - env_kwargs
11
  - null
12
- - - eval_env_kwargs
13
- - null
14
  - - eval_episodes
15
- - 5
16
  - - eval_freq
17
- - 25000
18
  - - gym_packages
19
  - []
20
  - - hyperparams
21
  - null
22
  - - log_folder
23
- - logs/
24
  - - log_interval
25
  - -1
26
- - - max_total_trials
27
- - null
28
- - - n_eval_envs
29
- - 1
30
  - - n_evaluations
31
- - null
32
  - - n_jobs
33
  - 1
34
  - - n_startup_trials
@@ -36,17 +26,11 @@
36
  - - n_timesteps
37
  - -1
38
  - - n_trials
39
- - 500
40
- - - no_optim_plots
41
- - false
42
  - - num_threads
43
  - -1
44
- - - optimization_log_path
45
- - null
46
  - - optimize_hyperparameters
47
  - false
48
- - - progress
49
- - false
50
  - - pruner
51
  - median
52
  - - sampler
@@ -56,28 +40,20 @@
56
  - - save_replay_buffer
57
  - false
58
  - - seed
59
- - 2831738852
60
  - - storage
61
  - null
62
  - - study_name
63
  - null
64
  - - tensorboard_log
65
  - ''
66
- - - track
67
- - false
68
  - - trained_agent
69
  - ''
70
  - - truncate_last_trajectory
71
  - true
72
  - - uuid
73
- - false
74
  - - vec_env
75
  - dummy
76
  - - verbose
77
  - 1
78
- - - wandb_entity
79
- - null
80
- - - wandb_project_name
81
- - sb3
82
- - - wandb_tags
83
- - []
 
1
  !!python/object/apply:collections.OrderedDict
2
  - - - algo
3
  - dqn
 
 
 
 
4
  - - env
5
  - SpaceInvadersNoFrameskip-v4
6
  - - env_kwargs
7
  - null
 
 
8
  - - eval_episodes
9
+ - 10
10
  - - eval_freq
11
+ - 10000
12
  - - gym_packages
13
  - []
14
  - - hyperparams
15
  - null
16
  - - log_folder
17
+ - rl-trained-agents/
18
  - - log_interval
19
  - -1
 
 
 
 
20
  - - n_evaluations
21
+ - 20
22
  - - n_jobs
23
  - 1
24
  - - n_startup_trials
 
26
  - - n_timesteps
27
  - -1
28
  - - n_trials
29
+ - 10
 
 
30
  - - num_threads
31
  - -1
 
 
32
  - - optimize_hyperparameters
33
  - false
 
 
34
  - - pruner
35
  - median
36
  - - sampler
 
40
  - - save_replay_buffer
41
  - false
42
  - - seed
43
+ - 234163638
44
  - - storage
45
  - null
46
  - - study_name
47
  - null
48
  - - tensorboard_log
49
  - ''
 
 
50
  - - trained_agent
51
  - ''
52
  - - truncate_last_trajectory
53
  - true
54
  - - uuid
55
+ - true
56
  - - vec_env
57
  - dummy
58
  - - verbose
59
  - 1
 
 
 
 
 
 
config.yml CHANGED
@@ -2,7 +2,7 @@
2
  - - - batch_size
3
  - 32
4
  - - buffer_size
5
- - 100000
6
  - - env_wrapper
7
  - - stable_baselines3.common.atari_wrappers.AtariWrapper
8
  - - exploration_final_eps
@@ -18,9 +18,9 @@
18
  - - learning_starts
19
  - 100000
20
  - - n_timesteps
21
- - 1000000.0
22
  - - optimize_memory_usage
23
- - false
24
  - - policy
25
  - CnnPolicy
26
  - - target_update_interval
 
2
  - - - batch_size
3
  - 32
4
  - - buffer_size
5
+ - 10000
6
  - - env_wrapper
7
  - - stable_baselines3.common.atari_wrappers.AtariWrapper
8
  - - exploration_final_eps
 
18
  - - learning_starts
19
  - 100000
20
  - - n_timesteps
21
+ - 10000000.0
22
  - - optimize_memory_usage
23
+ - true
24
  - - policy
25
  - CnnPolicy
26
  - - target_update_interval
dqn-SpaceInvadersNoFrameskip-v4.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae9e911bdb14c5e57140ce9b9b813036ac240ddbe7369869f416ca2c77f45846
3
- size 27220728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a993d2fe4bc23ce79d37614226f9f13ddf41316a2bba51cdb9e7f31abd30057
3
+ size 27219385
dqn-SpaceInvadersNoFrameskip-v4/data CHANGED
The diff for this file is too large to render. See raw diff
 
dqn-SpaceInvadersNoFrameskip-v4/policy.optimizer.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9f5567f957fb32d4cbd793691a2bedc558d159b900b71557eb90902abcdb5b8
3
- size 13506172
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c84b8e885044f7a519b5f2e8d11b729cff094f707153dfc4b355bb7912b4680
3
+ size 13506108
dqn-SpaceInvadersNoFrameskip-v4/policy.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3121998f51da46bdcb3f04aed3c3654e71676a1c0274271e68865a2e656569f
3
  size 13505370
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49caa05efa450005dbf9dfb9798c8cbf0e826a937dc985ccbe496e339a219598
3
  size 13505370
replay.mp4 CHANGED
Binary files a/replay.mp4 and b/replay.mp4 differ
 
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 656.0, "std_reward": 153.6033853793594, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2024-09-10T18:39:29.879012"}
 
1
+ {"mean_reward": 587.0, "std_reward": 118.36807001890332, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2024-09-11T18:18:52.841440"}
train_eval_metrics.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:858eae90983b292b74b077ccfb023f4d54554a04c6a5e29f56c135f3a9a0f8f1
3
- size 36229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07af25fa3fe753d1954eed59b867f04926ba4027500c905ce1a17dd768dd0142
3
+ size 446800