justinpinkney commited on
Commit
99316e8
1 Parent(s): acd7e3e

Upload image-mixer-config.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. image-mixer-config.yaml +134 -0
image-mixer-config.yaml ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 0.5e-05
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "jpg"
12
+ image_size: 80
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+
19
+ scheduler_config: # 10000 warmup steps
20
+ target: ldm.lr_scheduler.LambdaLinearScheduler
21
+ params:
22
+ warm_up_steps: [ 1000 ]
23
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
24
+ f_start: [ 1.e-6 ]
25
+ f_max: [ 1. ]
26
+ f_min: [ 1. ]
27
+
28
+ unet_config:
29
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
30
+ params:
31
+ image_size: 32 # unused
32
+ in_channels: 4
33
+ out_channels: 4
34
+ model_channels: 320
35
+ attention_resolutions: [ 4, 2, 1 ]
36
+ num_res_blocks: 2
37
+ channel_mult: [ 1, 2, 4, 4 ]
38
+ num_heads: 8
39
+ use_spatial_transformer: True
40
+ transformer_depth: 1
41
+ context_dim: 768
42
+ use_checkpoint: True
43
+ legacy: False
44
+
45
+ first_stage_config:
46
+ target: ldm.models.autoencoder.AutoencoderKL
47
+ params:
48
+ embed_dim: 4
49
+ monitor: val/rec_loss
50
+ ddconfig:
51
+ double_z: true
52
+ z_channels: 4
53
+ resolution: 256
54
+ in_channels: 3
55
+ out_ch: 3
56
+ ch: 128
57
+ ch_mult:
58
+ - 1
59
+ - 2
60
+ - 4
61
+ - 4
62
+ num_res_blocks: 2
63
+ attn_resolutions: []
64
+ dropout: 0.0
65
+ lossconfig:
66
+ target: torch.nn.Identity
67
+
68
+ cond_stage_config:
69
+ target: ldm.modules.encoders.modules.FrozenCLIPImageMutliEmbedder
70
+
71
+
72
+ data:
73
+ target: ldm.data.laion.WebDataModuleFromConfig
74
+ params:
75
+ tar_base: laion/improved_aesthetics_6plus/ims
76
+ batch_size: 1
77
+ num_workers: 2
78
+ multinode: True
79
+ min_size: 640
80
+ train:
81
+ shards: '{00000..01209}.tar'
82
+ shuffle: 10000
83
+ image_key: jpg
84
+ image_transforms:
85
+ - target: torchvision.transforms.Resize
86
+ params:
87
+ size: 640
88
+ interpolation: 3
89
+ - target: torchvision.transforms.RandomCrop
90
+ params:
91
+ size: 640
92
+
93
+ # NOTE use enough shards to avoid empty validation loops in workers
94
+ validation:
95
+ shards: '{00000..00008}.tar -'
96
+ shuffle: 0
97
+ image_key: jpg
98
+ image_transforms:
99
+ - target: torchvision.transforms.Resize
100
+ params:
101
+ size: 640
102
+ interpolation: 3
103
+ - target: torchvision.transforms.CenterCrop
104
+ params:
105
+ size: 640
106
+
107
+
108
+ lightning:
109
+ find_unused_parameters: false
110
+ modelcheckpoint:
111
+ params:
112
+ every_n_train_steps: 5000
113
+ callbacks:
114
+ image_logger:
115
+ target: main.ImageLogger
116
+ params:
117
+ batch_frequency: 5000
118
+ max_images: 8
119
+ increase_log_steps: False
120
+ log_first_step: True
121
+ log_images_kwargs:
122
+ use_ema_scope: False
123
+ inpaint: False
124
+ plot_progressive_rows: False
125
+ plot_diffusion_rows: False
126
+ N: 8
127
+ unconditional_guidance_scale: 3.0
128
+ unconditional_guidance_label: [""]
129
+
130
+ trainer:
131
+ benchmark: True
132
+ val_check_interval: 5000000 # really sorry
133
+ num_sanity_val_steps: 0
134
+ accumulate_grad_batches: 8