File size: 1,300 Bytes
b621857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
model:
  target: michelangelo.models.tsal.asl_pl_module.AlignedShapeAsLatentPLModule
  params:
    shape_module_cfg:
      target: michelangelo.models.tsal.sal_perceiver.AlignedShapeLatentPerceiver
      params:
        num_latents: 256
        embed_dim: 64
        point_feats: 3   # normal
        num_freqs: 8
        include_pi: false
        heads: 12
        width: 768
        num_encoder_layers: 8
        num_decoder_layers: 16
        use_ln_post: true
        init_scale: 0.25
        qkv_bias: false
        use_checkpoint: true
    aligned_module_cfg:
      target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
      params:
        clip_model_version: "./checkpoints/clip/clip-vit-large-patch14"

    loss_cfg:
      target: michelangelo.models.tsal.loss.ContrastKLNearFar
      params:
        contrast_weight: 0.1
        near_weight: 0.1
        kl_weight: 0.001

    optimizer_cfg:
      optimizer:
        target: torch.optim.AdamW
        params:
          betas: [0.9, 0.99]
          eps: 1.e-6
          weight_decay: 1.e-2

      scheduler:
        target: michelangelo.utils.trainings.lr_scheduler.LambdaWarmUpCosineFactorScheduler
        params:
          warm_up_steps: 5000
          f_start: 1.e-6
          f_min: 1.e-3
          f_max: 1.0