justinpinkney commited on
Commit
389784b
1 Parent(s): 99e6c4d

Upload configs with huggingface_hub

Browse files
configs/2022-09-02T06-46-25-lightning.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lightning:
2
+ find_unused_parameters: false
3
+ modelcheckpoint:
4
+ params:
5
+ every_n_train_steps: 5000
6
+ callbacks:
7
+ image_logger:
8
+ target: main.ImageLogger
9
+ params:
10
+ batch_frequency: 500
11
+ max_images: 4
12
+ increase_log_steps: false
13
+ log_first_step: false
14
+ log_images_kwargs:
15
+ use_ema_scope: false
16
+ inpaint: false
17
+ plot_progressive_rows: false
18
+ plot_diffusion_rows: false
19
+ 'N': 4
20
+ unconditional_guidance_scale: 3.0
21
+ unconditional_guidance_label:
22
+ - ''
23
+ trainer:
24
+ benchmark: true
25
+ num_sanity_val_steps: 0
26
+ accumulate_grad_batches: 1
27
+ accelerator: ddp
28
+ gpus: 4,5
configs/2022-09-02T06-46-25-project.yaml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 0.0001
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.012
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: jpg
11
+ cond_stage_key: txt
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ scheduler_config:
19
+ target: ldm.lr_scheduler.LambdaLinearScheduler
20
+ params:
21
+ warm_up_steps:
22
+ - 1
23
+ cycle_lengths:
24
+ - 10000000000000
25
+ f_start:
26
+ - 1.0e-06
27
+ f_max:
28
+ - 1.0
29
+ f_min:
30
+ - 1.0
31
+ unet_config:
32
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
33
+ params:
34
+ image_size: 32
35
+ in_channels: 4
36
+ out_channels: 4
37
+ model_channels: 320
38
+ attention_resolutions:
39
+ - 4
40
+ - 2
41
+ - 1
42
+ num_res_blocks: 2
43
+ channel_mult:
44
+ - 1
45
+ - 2
46
+ - 4
47
+ - 4
48
+ num_heads: 8
49
+ use_spatial_transformer: true
50
+ transformer_depth: 1
51
+ context_dim: 768
52
+ use_checkpoint: true
53
+ legacy: false
54
+ first_stage_config:
55
+ target: ldm.models.autoencoder.AutoencoderKL
56
+ ckpt_path: models/first_stage_models/kl-f8/model.ckpt
57
+ params:
58
+ embed_dim: 4
59
+ monitor: val/rec_loss
60
+ ddconfig:
61
+ double_z: true
62
+ z_channels: 4
63
+ resolution: 256
64
+ in_channels: 3
65
+ out_ch: 3
66
+ ch: 128
67
+ ch_mult:
68
+ - 1
69
+ - 2
70
+ - 4
71
+ - 4
72
+ num_res_blocks: 2
73
+ attn_resolutions: []
74
+ dropout: 0.0
75
+ lossconfig:
76
+ target: torch.nn.Identity
77
+ cond_stage_config:
78
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
79
+ data:
80
+ target: main.DataModuleFromConfig
81
+ params:
82
+ batch_size: 4
83
+ num_workers: 4
84
+ train:
85
+ target: ldm.data.simple.FolderData
86
+ params:
87
+ root_dir: datasets/pokemon/img
88
+ caption_file: datasets/pokemon/captions.json
89
+ image_transforms:
90
+ - target: torchvision.transforms.Resize
91
+ params:
92
+ size: 512
93
+ interpolation: 3
94
+ - target: torchvision.transforms.RandomCrop
95
+ params:
96
+ size: 512
97
+ validation:
98
+ target: ldm.data.simple.FolderData
99
+ params:
100
+ root_dir: datasets/pokemon/img
101
+ caption_file: datasets/pokemon/captions.json
102
+ image_transforms:
103
+ - target: torchvision.transforms.Resize
104
+ params:
105
+ size: 512
106
+ interpolation: 3
107
+ - target: torchvision.transforms.RandomCrop
108
+ params:
109
+ size: 512