leexskank commited on
Commit
25d7397
1 Parent(s): 5be2595

Upload dcaidcepoch03step50600pruned.yaml

Browse files
Files changed (1) hide show
  1. dcaidcepoch03step50600pruned.yaml +108 -0
dcaidcepoch03step50600pruned.yaml ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-6
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image
11
+ cond_stage_key: caption
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: true
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+ unfreeze_model: True
20
+ model_lr: 1.0e-6
21
+
22
+ unet_config:
23
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
24
+ params:
25
+ image_size: 32 # unused
26
+ in_channels: 4
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_heads: 8
33
+ use_spatial_transformer: True
34
+ transformer_depth: 1
35
+ context_dim: 768
36
+ use_checkpoint: True
37
+ legacy: False
38
+
39
+ first_stage_config:
40
+ target: ldm.models.autoencoder.AutoencoderKL
41
+ params:
42
+ embed_dim: 4
43
+ monitor: val/rec_loss
44
+ ddconfig:
45
+ double_z: true
46
+ z_channels: 4
47
+ resolution: 512
48
+ in_channels: 3
49
+ out_ch: 3
50
+ ch: 128
51
+ ch_mult:
52
+ - 1
53
+ - 2
54
+ - 4
55
+ - 4
56
+ num_res_blocks: 2
57
+ attn_resolutions: []
58
+ dropout: 0.0
59
+ lossconfig:
60
+ target: torch.nn.Identity
61
+
62
+ cond_stage_config:
63
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
64
+
65
+ data:
66
+ target: main.DataModuleFromConfig
67
+ params:
68
+ batch_size: 40 # prefer highest possible without getting CUDA Out of Memory error, A100 40GB =~20 80GB= ~48
69
+ num_workers: 8
70
+ wrap: falsegit
71
+ train:
72
+ target: ldm.data.every_dream.EveryDreamBatch
73
+ params:
74
+ repeats: 5 # rough suggestions: 5 with 5000+ images, 15 for 1000 images, use micro yaml for <100
75
+ debug_level: 1 # 1 to print if images are dropped due to multiple-aspect ratio image batching
76
+ conditional_dropout: 0.04 # experimental, likelihood to drop the caption, may help with poorly captioned images
77
+ resolution: 512 # use 512 for 24GB, can use 576, 640, 704, 768, on higher VRAM cards only..
78
+ validation:
79
+ target: ldm.data.ed_validate.EDValidateBatch
80
+ params:
81
+ repeats: 0.01
82
+ test:
83
+ target: ldm.data.ed_validate.EDValidateBatch
84
+ params:
85
+ repeats: 0.2
86
+
87
+ lightning:
88
+ modelcheckpoint:
89
+ params:
90
+ every_n_epochs: 1 # produce a ckpt every epoch, leave 1!
91
+ #every_n_train_steps: 1400 # can only use epoch or train step checkpoints
92
+ save_top_k: 6 # save the best N ckpts according to loss, can reduce to save disk space but suggest at LEAST 2, more if you have max_epochs below higher!
93
+ save_last: True
94
+ filename: "{epoch:02d}-{step:05d}"
95
+ callbacks:
96
+ image_logger:
97
+ target: main.ImageLogger
98
+ params:
99
+ batch_frequency: 200
100
+ max_images: 16
101
+ increase_log_steps: False
102
+
103
+ trainer:
104
+ benchmark: True
105
+ max_epochs: 6 # better to run several epochs and test your checkpoints! Try 4-5, you get a checkpoint every epoch to test!
106
+ max_steps: 99000 # better to end on epochs not steps, especially with >500 images to ensure even distribution, but you can set this if you really want...
107
+ check_val_every_n_epoch: 1
108
+ gpus: 0,