andrewzhang505 commited on
Commit
922f838
1 Parent(s): c333962

Upload with huggingface_hub

Browse files
.summary/0/events.out.tfevents.1657653433.andrew-gpu ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:748df02dd3566c234c1865c8baa5bc4dd5eb6d2e16d14baaf86c08b8e74313a4
3
+ size 13908529
.summary/0/events.out.tfevents.1657748005.andrew-gpu ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b87a967636a7230e0fa9cbedb470794924039215c06b5d9f3f7df06b546a695
3
+ size 40
README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sample-factory
3
+ tags:
4
+ - deep-reinforcement-learning
5
+ - reinforcement-learning
6
+ - sample-factory
7
+ ---
8
+
9
+ A(n) **APPO** model trained on the **quadrotor_multi** environment.
10
+ This model was trained using Sample Factory 2.0: https://github.com/alex-petrenko/sample-factory
11
+
cfg.json ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "quadrotor_multi",
5
+ "experiment": "00_baseline_see_0",
6
+ "train_dir": "./train_dir/quad_single_baseline/baseline_",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": 0,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 100000000,
18
+ "num_workers": 36,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 128,
24
+ "recurrence": 1,
25
+ "shuffle_minibatches": true,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": false,
31
+ "exploration_loss_coeff": 0.0,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "entropy",
35
+ "gae_lambda": 1.0,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 5.0,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 5.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "obs_subtract_mean": 0.0,
50
+ "obs_scale": 1.0,
51
+ "normalize_input": false,
52
+ "decorrelate_experience_max_seconds": 10,
53
+ "decorrelate_envs_on_one_worker": true,
54
+ "actor_worker_gpus": [],
55
+ "set_workers_cpu_affinity": true,
56
+ "force_envs_single_thread": true,
57
+ "default_niceness": 0,
58
+ "experiment_summaries_interval": 60,
59
+ "stats_avg": 100,
60
+ "train_for_env_steps": 10000000000,
61
+ "train_for_seconds": 10000000000,
62
+ "save_every_sec": 120,
63
+ "keep_checkpoints": 3,
64
+ "load_checkpoint_kind": "latest",
65
+ "save_milestones_sec": 10000,
66
+ "save_best_every_sec": 5,
67
+ "save_best_metric": "reward",
68
+ "save_best_after": 100000,
69
+ "benchmark": false,
70
+ "encoder_type": "mlp",
71
+ "encoder_subtype": "mlp_quads",
72
+ "encoder_custom": "quad_multi_encoder",
73
+ "encoder_extra_fc_layers": 0,
74
+ "hidden_size": 16,
75
+ "nonlinearity": "tanh",
76
+ "policy_initialization": "xavier_uniform",
77
+ "policy_init_gain": 1.0,
78
+ "actor_critic_share_weights": false,
79
+ "adaptive_stddev": false,
80
+ "initial_stddev": 1.0,
81
+ "use_rnn": false,
82
+ "rnn_type": "gru",
83
+ "rnn_num_layers": 1,
84
+ "env_gpu_actions": false,
85
+ "env_frameskip": 1,
86
+ "env_framestack": 4,
87
+ "pixel_format": "CHW",
88
+ "with_wandb": true,
89
+ "wandb_user": null,
90
+ "wandb_project": "sample_factory",
91
+ "wandb_group": null,
92
+ "wandb_job_type": "SF",
93
+ "wandb_tags": [
94
+ "sf2"
95
+ ],
96
+ "quads_discretize_actions": -1,
97
+ "quads_clip_input": false,
98
+ "quads_effort_reward": null,
99
+ "quads_episode_duration": 15.0,
100
+ "quads_num_agents": 1,
101
+ "quads_neighbor_hidden_size": 256,
102
+ "quads_neighbor_encoder_type": "no_encoder",
103
+ "quads_collision_reward": 5.0,
104
+ "quads_collision_obstacle_reward": 0.0,
105
+ "quads_settle_reward": 0.0,
106
+ "quads_settle": false,
107
+ "quads_vel_reward_out_range": 0.8,
108
+ "quads_settle_range_meters": 1.0,
109
+ "quads_collision_hitbox_radius": 2.0,
110
+ "quads_collision_falloff_radius": 4.0,
111
+ "quads_collision_smooth_max_penalty": 10.0,
112
+ "neighbor_obs_type": "none",
113
+ "quads_use_numba": true,
114
+ "quads_obstacle_mode": "no_obstacles",
115
+ "quads_obstacle_num": 0,
116
+ "quads_obstacle_type": "sphere",
117
+ "quads_obstacle_size": 0.0,
118
+ "quads_obstacle_traj": "gravity",
119
+ "quads_local_obs": -1,
120
+ "quads_local_coeff": 1.0,
121
+ "quads_local_metric": "dist",
122
+ "quads_view_mode": "local",
123
+ "quads_adaptive_env": false,
124
+ "quads_mode": "mix",
125
+ "quads_formation": "circle_horizontal",
126
+ "quads_formation_size": 0.0,
127
+ "room_dims": [
128
+ 10,
129
+ 10,
130
+ 10
131
+ ],
132
+ "quads_obs_repr": "xyz_vxyz_R_omega",
133
+ "replay_buffer_sample_prob": 0.0,
134
+ "anneal_collision_steps": 0.0,
135
+ "quads_obstacle_obs_mode": "relative",
136
+ "quads_obstacle_hidden_size": 32,
137
+ "quads_collision_obst_smooth_max_penalty": 10.0,
138
+ "quads_obst_penalty_fall_off": 10.0,
139
+ "use_spectral_norm": false,
140
+ "command_line": "--env=quadrotor_multi --train_for_env_steps=1000000000 --algo=APPO --use_rnn=False --num_workers=36 --num_envs_per_worker=4 --learning_rate=0.0001 --ppo_clip_value=5.0 --recurrence=1 --nonlinearity=tanh --actor_critic_share_weights=False --policy_initialization=xavier_uniform --adaptive_stddev=False --with_vtrace=False --max_policy_lag=100000000 --hidden_size=256 --gae_lambda=1.00 --max_grad_norm=5.0 --exploration_loss_coeff=0.0 --rollout=128 --batch_size=1024 --quads_use_numba=True --quads_mode=mix --quads_episode_duration=15.0 --quads_formation_size=0.0 --encoder_custom=quad_multi_encoder --quads_collision_reward=5.0 --quads_neighbor_hidden_size=256 --neighbor_obs_type=pos_vel --quads_settle_reward=0.0 --quads_collision_hitbox_radius=2.0 --quads_collision_falloff_radius=4.0 --quads_local_obs=6 --quads_local_metric=dist --quads_local_coeff=1.0 --quads_num_agents=8 --quads_collision_reward=5.0 --quads_collision_smooth_max_penalty=10.0 --quads_neighbor_encoder_type=attention --replay_buffer_sample_prob=0.75 --anneal_collision_steps=300000000 --train_for_env_steps=10000000000 --hidden_size=16 --neighbor_obs_type=none --quads_local_obs=-1 --quads_num_agents=1 --replay_buffer_sample_prob=0.0 --anneal_collision_steps=0 --save_milestones_sec=10000 --quads_neighbor_encoder_type=no_encoder --serial_mode=False --with_wandb=True --wandb_tags sf2 --seed=0 --experiment=00_baseline_see_0 --train_dir=./train_dir/quad_single_baseline/baseline_",
141
+ "cli_args": {
142
+ "algo": "APPO",
143
+ "env": "quadrotor_multi",
144
+ "experiment": "00_baseline_see_0",
145
+ "train_dir": "./train_dir/quad_single_baseline/baseline_",
146
+ "seed": 0,
147
+ "serial_mode": false,
148
+ "max_policy_lag": 100000000,
149
+ "num_workers": 36,
150
+ "num_envs_per_worker": 4,
151
+ "batch_size": 1024,
152
+ "rollout": 128,
153
+ "recurrence": 1,
154
+ "exploration_loss_coeff": 0.0,
155
+ "gae_lambda": 1.0,
156
+ "ppo_clip_value": 5.0,
157
+ "with_vtrace": false,
158
+ "max_grad_norm": 5.0,
159
+ "learning_rate": 0.0001,
160
+ "train_for_env_steps": 10000000000,
161
+ "save_milestones_sec": 10000,
162
+ "encoder_custom": "quad_multi_encoder",
163
+ "hidden_size": 16,
164
+ "nonlinearity": "tanh",
165
+ "policy_initialization": "xavier_uniform",
166
+ "actor_critic_share_weights": false,
167
+ "adaptive_stddev": false,
168
+ "use_rnn": false,
169
+ "with_wandb": true,
170
+ "wandb_tags": [
171
+ "sf2"
172
+ ],
173
+ "quads_episode_duration": 15.0,
174
+ "quads_num_agents": 1,
175
+ "quads_neighbor_hidden_size": 256,
176
+ "quads_neighbor_encoder_type": "no_encoder",
177
+ "quads_collision_reward": 5.0,
178
+ "quads_settle_reward": 0.0,
179
+ "quads_collision_hitbox_radius": 2.0,
180
+ "quads_collision_falloff_radius": 4.0,
181
+ "quads_collision_smooth_max_penalty": 10.0,
182
+ "neighbor_obs_type": "none",
183
+ "quads_use_numba": true,
184
+ "quads_local_obs": -1,
185
+ "quads_local_coeff": 1.0,
186
+ "quads_local_metric": "dist",
187
+ "quads_mode": "mix",
188
+ "quads_formation_size": 0.0,
189
+ "replay_buffer_sample_prob": 0.0,
190
+ "anneal_collision_steps": 0.0
191
+ },
192
+ "git_hash": "e6745398a0413737776f3cd1681cf77173b05921",
193
+ "git_repo_name": "https://github.com/andrewzhang505/sample-factory.git",
194
+ "wandb_unique_id": "00_baseline_see_0_20220712_191712_050475"
195
+ }
checkpoint_p0/best_000433823_444234752_reward_2.295.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0d9261c1f8dc571f09623c6abaca4ecc2597bf356e220db73958dd153f16a72
3
+ size 45068
checkpoint_p0/checkpoint_000434271_444693504.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e08a4fe9a8f92717dc17078f72ba970e48ed9227b34ace3d96e0c273f4fe857
3
+ size 45068
checkpoint_p0/checkpoint_000434837_445273088.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86d8e77863878e0a2aaab9b12c3ee4113160397bc5a0586e04d78190cabcb062
3
+ size 45068
checkpoint_p0/checkpoint_000435337_445785088.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36b83a0ab91f0bb3b86414f188dcd44358049faea08c1b9469f437221a1c81be
3
+ size 45068
env_info_quadrotor_multi ADDED
Binary file (1.21 kB). View file
git.diff ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diff --git a/sf_examples/swarm_rl_examples/runs/single_drone.py b/sf_examples/swarm_rl_examples/runs/single_drone.py
2
+ index 03de717..4652986 100644
3
+ --- a/sf_examples/swarm_rl_examples/runs/single_drone.py
4
+ +++ b/sf_examples/swarm_rl_examples/runs/single_drone.py
5
+ @@ -10,7 +10,8 @@ _params = ParamGrid(
6
+ SMALL_MODEL_CLI = QUAD_BASELINE_CLI + (
7
+ " --train_for_env_steps=10000000000 --hidden_size=16 --neighbor_obs_type=none --quads_local_obs=-1 "
8
+ "--quads_num_agents=1 --replay_buffer_sample_prob=0.0 --anneal_collision_steps=0 --save_milestones_sec=10000 "
9
+ - "--quads_neighbor_encoder_type=no_encoder --serial_mode=False --with_wandb=True --wandb_tags sf2"
10
+ + "--quads_neighbor_encoder_type=no_encoder --serial_mode=False --with_wandb=True --wandb_tags sf2 sync"
11
+ + "--async_rl=False --reward_clip=10.0"
12
+ )
13
+
14
+ _experiment = Experiment(
sf_log.txt ADDED
The diff for this file is too large to render. See raw diff