File size: 1,300 Bytes
5cc1e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
{
  "chunk_size": 100,
  "dim_feedforward": 3200,
  "dim_model": 512,
  "dropout": 0.1,
  "feedforward_activation": "relu",
  "input_normalization_modes": {
    "observation.images.cam_high": "mean_std",
    "observation.images.cam_left_wrist": "mean_std",
    "observation.images.cam_low": "mean_std",
    "observation.images.cam_right_wrist": "mean_std",
    "observation.state": "mean_std"
  },
  "input_shapes": {
    "observation.images.cam_high": [
      3,
      480,
      640
    ],
    "observation.images.cam_left_wrist": [
      3,
      480,
      640
    ],
    "observation.images.cam_low": [
      3,
      480,
      640
    ],
    "observation.images.cam_right_wrist": [
      3,
      480,
      640
    ],
    "observation.state": [
      14
    ]
  },
  "kl_weight": 10.0,
  "latent_dim": 32,
  "n_action_steps": 100,
  "n_decoder_layers": 1,
  "n_encoder_layers": 4,
  "n_heads": 8,
  "n_obs_steps": 1,
  "n_vae_encoder_layers": 4,
  "output_normalization_modes": {
    "action": "mean_std"
  },
  "output_shapes": {
    "action": [
      14
    ]
  },
  "pre_norm": false,
  "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
  "replace_final_stride_with_dilation": false,
  "temporal_ensemble_momentum": null,
  "use_vae": true,
  "vision_backbone": "resnet18"
}