maximellerbach commited on
Commit
8285b14
·
verified ·
1 Parent(s): e35289b

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. config.json +2 -11
  3. model.safetensors +2 -2
  4. train_config.json +5 -14
README.md CHANGED
@@ -5,9 +5,9 @@ license: apache-2.0
5
  model_name: wam
6
  pipeline_tag: robotics
7
  tags:
8
- - wam
9
  - lerobot
10
  - robotics
 
11
  ---
12
 
13
  # Model Card for wam
 
5
  model_name: wam
6
  pipeline_tag: robotics
7
  tags:
 
8
  - lerobot
9
  - robotics
10
+ - wam
11
  ---
12
 
13
  # Model Card for wam
config.json CHANGED
@@ -33,7 +33,7 @@
33
  "private": null,
34
  "tags": null,
35
  "license": null,
36
- "pretrained_path": null,
37
  "time_between_frames": 50,
38
  "lag_offset": 3,
39
  "image_size": [
@@ -55,7 +55,7 @@
55
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
56
  "use_pretrained_backbone": true,
57
  "freeze_backbone": false,
58
- "latent_dim": 32,
59
  "n_vae_encoder_layers": 4,
60
  "action_head_num_layers": 4,
61
  "action_head_num_heads": 8,
@@ -64,15 +64,6 @@
64
  "predictor_num_heads": 8,
65
  "predictor_dropout": 0.1,
66
  "predictor_max_tokens": 1024,
67
- "decoder_attention_blur_kernel": 7,
68
- "decoder_use_attention_head": true,
69
- "decoder_attention_gain_init": 1.0,
70
- "decoder_attention_supervision_weight": 0.2,
71
- "decoder_attention_mass_weight": 0.05,
72
- "decoder_attention_pos_weight": 4.0,
73
- "decoder_attention_min_mass": 0.03,
74
- "decoder_attention_floor_weight": 0.1,
75
- "decoder_attention_floor": 0.02,
76
  "optimizer_lr": 5e-05,
77
  "optimizer_weight_decay": 0.01,
78
  "optimizer_grad_clip_norm": 1.0,
 
33
  "private": null,
34
  "tags": null,
35
  "license": null,
36
+ "pretrained_path": "outputs/train/wam_aloha_decoder/checkpoints/last/pretrained_model",
37
  "time_between_frames": 50,
38
  "lag_offset": 3,
39
  "image_size": [
 
55
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
56
  "use_pretrained_backbone": true,
57
  "freeze_backbone": false,
58
+ "latent_dim": 64,
59
  "n_vae_encoder_layers": 4,
60
  "action_head_num_layers": 4,
61
  "action_head_num_heads": 8,
 
64
  "predictor_num_heads": 8,
65
  "predictor_dropout": 0.1,
66
  "predictor_max_tokens": 1024,
 
 
 
 
 
 
 
 
 
67
  "optimizer_lr": 5e-05,
68
  "optimizer_weight_decay": 0.01,
69
  "optimizer_grad_clip_norm": 1.0,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dad9c6fb489c6c30e282aefb65e674645856c4264e5e9a1b24c527b7faf1bb85
3
- size 301229464
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f54c122145030f5be63ef711d1f22fef5de14381c3c78f3f2ab31719c99828cc
3
+ size 301331136
train_config.json CHANGED
@@ -154,7 +154,7 @@
154
  "private": null,
155
  "tags": null,
156
  "license": null,
157
- "pretrained_path": null,
158
  "time_between_frames": 50,
159
  "lag_offset": 3,
160
  "image_size": [
@@ -176,7 +176,7 @@
176
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
177
  "use_pretrained_backbone": true,
178
  "freeze_backbone": false,
179
- "latent_dim": 32,
180
  "n_vae_encoder_layers": 4,
181
  "action_head_num_layers": 4,
182
  "action_head_num_heads": 8,
@@ -185,15 +185,6 @@
185
  "predictor_num_heads": 8,
186
  "predictor_dropout": 0.1,
187
  "predictor_max_tokens": 1024,
188
- "decoder_attention_blur_kernel": 7,
189
- "decoder_use_attention_head": true,
190
- "decoder_attention_gain_init": 1.0,
191
- "decoder_attention_supervision_weight": 0.2,
192
- "decoder_attention_mass_weight": 0.05,
193
- "decoder_attention_pos_weight": 4.0,
194
- "decoder_attention_min_mass": 0.03,
195
- "decoder_attention_floor_weight": 0.1,
196
- "decoder_attention_floor": 0.02,
197
  "optimizer_lr": 5e-05,
198
  "optimizer_weight_decay": 0.01,
199
  "optimizer_grad_clip_norm": 1.0,
@@ -203,7 +194,7 @@
203
  },
204
  "output_dir": "outputs/train/wam_aloha_decoder",
205
  "job_name": "aloha_wam",
206
- "resume": false,
207
  "seed": 1000,
208
  "cudnn_deterministic": false,
209
  "num_workers": 4,
@@ -244,7 +235,7 @@
244
  "project": "lerobot",
245
  "entity": null,
246
  "notes": null,
247
- "run_id": "lst682vd",
248
  "mode": null,
249
  "add_tags": true
250
  },
@@ -255,5 +246,5 @@
255
  "rabc_epsilon": 1e-06,
256
  "rabc_head_mode": "sparse",
257
  "rename_map": {},
258
- "checkpoint_path": null
259
  }
 
154
  "private": null,
155
  "tags": null,
156
  "license": null,
157
+ "pretrained_path": "outputs/train/wam_aloha_decoder/checkpoints/last/pretrained_model",
158
  "time_between_frames": 50,
159
  "lag_offset": 3,
160
  "image_size": [
 
176
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
177
  "use_pretrained_backbone": true,
178
  "freeze_backbone": false,
179
+ "latent_dim": 64,
180
  "n_vae_encoder_layers": 4,
181
  "action_head_num_layers": 4,
182
  "action_head_num_heads": 8,
 
185
  "predictor_num_heads": 8,
186
  "predictor_dropout": 0.1,
187
  "predictor_max_tokens": 1024,
 
 
 
 
 
 
 
 
 
188
  "optimizer_lr": 5e-05,
189
  "optimizer_weight_decay": 0.01,
190
  "optimizer_grad_clip_norm": 1.0,
 
194
  },
195
  "output_dir": "outputs/train/wam_aloha_decoder",
196
  "job_name": "aloha_wam",
197
+ "resume": true,
198
  "seed": 1000,
199
  "cudnn_deterministic": false,
200
  "num_workers": 4,
 
235
  "project": "lerobot",
236
  "entity": null,
237
  "notes": null,
238
+ "run_id": "5xztxc64",
239
  "mode": null,
240
  "add_tags": true
241
  },
 
246
  "rabc_epsilon": 1e-06,
247
  "rabc_head_mode": "sparse",
248
  "rename_map": {},
249
+ "checkpoint_path": "outputs/train/wam_aloha_decoder/checkpoints/last"
250
  }