NicolasYn commited on
Commit
028466c
1 Parent(s): f61aeb5

Upload . with huggingface_hub

Browse files
.summary/0/events.out.tfevents.1712425402.gpu3.enst.fr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b6adb9c3c1e3d4011789058ce6eb0e65d58e750fb18b9bd87ca165979c6bc75
3
+ size 2152
.summary/0/events.out.tfevents.1712425564.gpu3.enst.fr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e17810992dc18c5eeeff992905d660e1b1507c943f1ef20a737c44999d53eee
3
+ size 246869
README.md CHANGED
@@ -1,13 +1,11 @@
1
  ---
 
2
  tags:
3
- - LunarLander-v2
4
- - ppo
5
  - deep-reinforcement-learning
6
  - reinforcement-learning
7
- - custom-implementation
8
- - deep-rl-course
9
  model-index:
10
- - name: PPO
11
  results:
12
  - task:
13
  type: reinforcement-learning
@@ -17,45 +15,42 @@ model-index:
17
  type: LunarLander-v2
18
  metrics:
19
  - type: mean_reward
20
- value: -64.00 +/- 19.54
21
  name: mean_reward
22
  verified: false
23
  ---
24
 
25
- # PPO Agent Playing LunarLander-v2
26
-
27
- This is a trained model of a PPO agent playing LunarLander-v2.
28
-
29
- # Hyperparameters
30
- ```python
31
- {'exp_name': 'unit8_ppo1'
32
- 'seed': 1
33
- 'torch_deterministic': True
34
- 'cuda': True
35
- 'track': False
36
- 'wandb_project_name': 'cleanRL'
37
- 'wandb_entity': None
38
- 'capture_video': False
39
- 'env_id': 'LunarLander-v2'
40
- 'total_timesteps': 500000
41
- 'learning_rate': 0.00025
42
- 'num_envs': 4
43
- 'num_steps': 128
44
- 'anneal_lr': True
45
- 'gae': True
46
- 'gamma': 0.99
47
- 'gae_lambda': 0.95
48
- 'num_minibatches': 4
49
- 'update_epochs': 4
50
- 'norm_adv': True
51
- 'clip_coef': 0.2
52
- 'clip_vloss': True
53
- 'ent_coef': 0.01
54
- 'vf_coef': 0.5
55
- 'max_grad_norm': 0.5
56
- 'target_kl': None
57
- 'repo_id': 'NicolasYn/ppo8-LunarLander-v2'
58
- 'batch_size': 512
59
- 'minibatch_size': 128}
60
- ```
61
-
 
1
  ---
2
+ library_name: sample-factory
3
  tags:
 
 
4
  - deep-reinforcement-learning
5
  - reinforcement-learning
6
+ - sample-factory
 
7
  model-index:
8
+ - name: APPO
9
  results:
10
  - task:
11
  type: reinforcement-learning
 
15
  type: LunarLander-v2
16
  metrics:
17
  - type: mean_reward
18
+ value: 205.87 +/- 83.70
19
  name: mean_reward
20
  verified: false
21
  ---
22
 
23
+ A(n) **APPO** model trained on the **LunarLander-v2** environment.
24
+
25
+ This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
26
+ Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
27
+
28
+
29
+ ## Downloading the model
30
+
31
+ After installing Sample-Factory, download the model with:
32
+ ```
33
+ python -m sample_factory.huggingface.load_from_hub -r NicolasYn/ppo8-LunarLander-v2
34
+ ```
35
+
36
+
37
+ ## Using the model
38
+
39
+ To run the model after download, use the `enjoy` script corresponding to this environment:
40
+ ```
41
+ python -m <path.to.enjoy.module> --algo=APPO --env=LunarLander-v2 --train_dir=./train_dir --experiment=ppo8-LunarLander-v2
42
+ ```
43
+
44
+
45
+ You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
46
+ See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
47
+
48
+ ## Training with this model
49
+
50
+ To continue training with this model, use the `train` script corresponding to this environment:
51
+ ```
52
+ python -m <path.to.train.module> --algo=APPO --env=LunarLander-v2 --train_dir=./train_dir --experiment=ppo8-LunarLander-v2 --restart_behavior=resume --train_for_env_steps=10000000000
53
+ ```
54
+
55
+ Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
56
+
 
 
 
checkpoint_p0/best_000000392_401408_reward_180.827.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e347dae9c7730c291232b6ec35b31aba51e63bb5ac6be1733af49137ee55e5d2
3
+ size 22166443
checkpoint_p0/checkpoint_000000929_951296.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82e3381e063960b28c61f79cd703c81278aba020c731118f0fc991fe511d8e5b
3
+ size 22166773
checkpoint_p0/checkpoint_000000978_1001472.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:016e4060df1b03b5be7051be6323aaea338ce225854a68011b41f85e15bfb0f7
3
+ size 22166773
config.json ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "LunarLander-v2",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "./train_dir_LunarLander-v2",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.003,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "entropy",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 1.0,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "lr_adaptive_min": 1e-06,
50
+ "lr_adaptive_max": 0.01,
51
+ "obs_subtract_mean": 0.0,
52
+ "obs_scale": 1.0,
53
+ "normalize_input": true,
54
+ "normalize_input_keys": null,
55
+ "decorrelate_experience_max_seconds": 0,
56
+ "decorrelate_envs_on_one_worker": true,
57
+ "actor_worker_gpus": [],
58
+ "set_workers_cpu_affinity": true,
59
+ "force_envs_single_thread": false,
60
+ "default_niceness": 0,
61
+ "log_to_file": false,
62
+ "experiment_summaries_interval": 10,
63
+ "flush_summaries_interval": 30,
64
+ "stats_avg": 100,
65
+ "summaries_use_frameskip": true,
66
+ "heartbeat_interval": 20,
67
+ "heartbeat_reporting_interval": 180,
68
+ "train_for_env_steps": 1000000,
69
+ "train_for_seconds": 10000000000,
70
+ "save_every_sec": 120,
71
+ "keep_checkpoints": 2,
72
+ "load_checkpoint_kind": "latest",
73
+ "save_milestones_sec": -1,
74
+ "save_best_every_sec": 5,
75
+ "save_best_metric": "reward",
76
+ "save_best_after": 100000,
77
+ "benchmark": false,
78
+ "encoder_mlp_layers": [
79
+ 512,
80
+ 512
81
+ ],
82
+ "encoder_conv_architecture": "convnet_simple",
83
+ "encoder_conv_mlp_layers": [
84
+ 512
85
+ ],
86
+ "use_rnn": true,
87
+ "rnn_size": 512,
88
+ "rnn_type": "gru",
89
+ "rnn_num_layers": 1,
90
+ "decoder_mlp_layers": [],
91
+ "nonlinearity": "elu",
92
+ "policy_initialization": "orthogonal",
93
+ "policy_init_gain": 1.0,
94
+ "actor_critic_share_weights": true,
95
+ "adaptive_stddev": true,
96
+ "continuous_tanh_scale": 0.0,
97
+ "initial_stddev": 1.0,
98
+ "use_env_info_cache": false,
99
+ "env_gpu_actions": false,
100
+ "env_gpu_observations": true,
101
+ "env_frameskip": 1,
102
+ "env_framestack": 1,
103
+ "pixel_format": "CHW",
104
+ "use_record_episode_statistics": false,
105
+ "with_wandb": false,
106
+ "wandb_user": null,
107
+ "wandb_project": "sample_factory",
108
+ "wandb_group": null,
109
+ "wandb_job_type": "SF",
110
+ "wandb_tags": [],
111
+ "with_pbt": false,
112
+ "pbt_mix_policies_in_one_env": true,
113
+ "pbt_period_env_steps": 5000000,
114
+ "pbt_start_mutation": 20000000,
115
+ "pbt_replace_fraction": 0.3,
116
+ "pbt_mutation_rate": 0.15,
117
+ "pbt_replace_reward_gap": 0.1,
118
+ "pbt_replace_reward_gap_absolute": 1e-06,
119
+ "pbt_optimize_gamma": false,
120
+ "pbt_target_objective": "true_objective",
121
+ "pbt_perturb_min": 1.1,
122
+ "pbt_perturb_max": 1.5,
123
+ "command_line": "--env=LunarLander-v2 --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=10 --train_dir=./train_dir_LunarLander-v2 --log_to_file=False",
124
+ "cli_args": {
125
+ "env": "LunarLander-v2",
126
+ "train_dir": "./train_dir_LunarLander-v2",
127
+ "num_workers": 8,
128
+ "num_envs_per_worker": 4,
129
+ "log_to_file": false,
130
+ "train_for_env_steps": 10
131
+ },
132
+ "git_hash": "unknown",
133
+ "git_repo_name": "not a git repository"
134
+ }
git.diff ADDED
File without changes
replay.mp4 CHANGED
Binary files a/replay.mp4 and b/replay.mp4 differ