pxovela commited on
Commit
85a3c8b
1 Parent(s): a7340ae

Upload 7 files

Browse files

ED2 parameter used for unet/TE overtraining experiment by https://followfoxai.substack.com/

Training Settings/Text Encoder/1_burnt_text_encoder_cfg.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": "train.json",
3
+ "amp": false,
4
+ "batch_size": 4,
5
+ "ckpt_every_n_minutes": null,
6
+ "clip_grad_norm": null,
7
+ "clip_skip": 0,
8
+ "cond_dropout": 0.04,
9
+ "data_root": "D:\\ED2\\EveryDream2trainer\\logs\\burnt_base_20230508-210229\\ckpts\\last-burnt_base-ep49-gs00350",
10
+ "disable_textenc_training": false,
11
+ "disable_xformers": false,
12
+ "flip_p": 0.0,
13
+ "gpuid": 0,
14
+ "gradient_checkpointing": true,
15
+ "grad_accum": 1,
16
+ "logdir": "logs",
17
+ "log_step": 1,
18
+ "lowvram": false,
19
+ "lr": 1.5e-06,
20
+ "lr_decay_steps": 0,
21
+ "lr_scheduler": "constant",
22
+ "lr_warmup_steps": 0,
23
+ "max_epochs": 500,
24
+ "notebook": false,
25
+ "optimizer_config": "optimizer.json",
26
+ "project_name": "burnt_text_encoder",
27
+ "resolution": 512,
28
+ "resume_ckpt": "sd_v1-5_vae",
29
+ "run_name": null,
30
+ "sample_prompts": "sample_prompts.txt",
31
+ "sample_steps": 5000000,
32
+ "save_ckpt_dir": null,
33
+ "save_ckpts_from_n_epochs": 0,
34
+ "save_every_n_epochs": 25,
35
+ "save_optimizer": false,
36
+ "scale_lr": false,
37
+ "seed": 555,
38
+ "shuffle_tags": false,
39
+ "validation_config": "validation_default.json",
40
+ "wandb": false,
41
+ "write_schedule": false,
42
+ "rated_dataset": false,
43
+ "rated_dataset_target_dropout_percent": 50,
44
+ "zero_frequency_noise_ratio": 0.0,
45
+ "save_full_precision": false,
46
+ "disable_unet_training": true,
47
+ "rated_dataset_target_dropout_rate": 50,
48
+ "disable_amp": false,
49
+ "useadam8bit": false
50
+ }
Training Settings/Text Encoder/2_burnt_TE_extra_cfg.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": "train.json",
3
+ "amp": false,
4
+ "batch_size": 4,
5
+ "ckpt_every_n_minutes": null,
6
+ "clip_grad_norm": null,
7
+ "clip_skip": 0,
8
+ "cond_dropout": 0.04,
9
+ "data_root": "D:\\ED2\\EveryDream2trainer\\input\\merab_6_longer_adjusted_captions",
10
+ "disable_textenc_training": false,
11
+ "disable_xformers": false,
12
+ "flip_p": 0.0,
13
+ "gpuid": 0,
14
+ "gradient_checkpointing": true,
15
+ "grad_accum": 1,
16
+ "logdir": "logs",
17
+ "log_step": 1,
18
+ "lowvram": false,
19
+ "lr": 9e-06,
20
+ "lr_decay_steps": 0,
21
+ "lr_scheduler": "constant",
22
+ "lr_warmup_steps": 0,
23
+ "max_epochs": 500,
24
+ "notebook": false,
25
+ "optimizer_config": "optimizer.json",
26
+ "project_name": "burnt_TE_extra",
27
+ "resolution": 512,
28
+ "resume_ckpt": "logs\\burnt_text_encoder_20230508-213407\\ckpts\\last-burnt_text_encoder-ep499-gs03500",
29
+ "run_name": null,
30
+ "sample_prompts": "sample_prompts.txt",
31
+ "sample_steps": 5000000,
32
+ "save_ckpt_dir": null,
33
+ "save_ckpts_from_n_epochs": 0,
34
+ "save_every_n_epochs": 100,
35
+ "save_optimizer": false,
36
+ "scale_lr": false,
37
+ "seed": 555,
38
+ "shuffle_tags": false,
39
+ "validation_config": "validation_default.json",
40
+ "wandb": false,
41
+ "write_schedule": false,
42
+ "rated_dataset": false,
43
+ "rated_dataset_target_dropout_percent": 50,
44
+ "zero_frequency_noise_ratio": 0.0,
45
+ "save_full_precision": false,
46
+ "disable_unet_training": true,
47
+ "rated_dataset_target_dropout_rate": 50,
48
+ "disable_amp": false,
49
+ "useadam8bit": false
50
+ }
Training Settings/Unet/1_burnt_unet_cfg.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": "train.json",
3
+ "amp": false,
4
+ "batch_size": 4,
5
+ "ckpt_every_n_minutes": null,
6
+ "clip_grad_norm": null,
7
+ "clip_skip": 0,
8
+ "cond_dropout": 0.04,
9
+ "data_root": "D:\\ED2\\EveryDream2trainer\\input\\merab_6_longer_adjusted_captions",
10
+ "disable_textenc_training": true,
11
+ "disable_xformers": false,
12
+ "flip_p": 0.0,
13
+ "gpuid": 0,
14
+ "gradient_checkpointing": true,
15
+ "grad_accum": 1,
16
+ "logdir": "logs",
17
+ "log_step": 1,
18
+ "lowvram": false,
19
+ "lr": 1.5e-06,
20
+ "lr_decay_steps": 0,
21
+ "lr_scheduler": "constant",
22
+ "lr_warmup_steps": 0,
23
+ "max_epochs": 500,
24
+ "notebook": false,
25
+ "optimizer_config": "optimizer.json",
26
+ "project_name": "burnt_unet",
27
+ "resolution": 512,
28
+ "resume_ckpt": "logs\\burnt_base_20230508-210229\\ckpts\\last-burnt_base-ep49-gs00350",
29
+ "run_name": null,
30
+ "sample_prompts": "sample_prompts.txt",
31
+ "sample_steps": 5000000,
32
+ "save_ckpt_dir": null,
33
+ "save_ckpts_from_n_epochs": 0,
34
+ "save_every_n_epochs": 25,
35
+ "save_optimizer": false,
36
+ "scale_lr": false,
37
+ "seed": 555,
38
+ "shuffle_tags": false,
39
+ "validation_config": "validation_default.json",
40
+ "wandb": false,
41
+ "write_schedule": false,
42
+ "rated_dataset": false,
43
+ "rated_dataset_target_dropout_percent": 50,
44
+ "zero_frequency_noise_ratio": 0.0,
45
+ "save_full_precision": false,
46
+ "disable_unet_training": false,
47
+ "rated_dataset_target_dropout_rate": 50,
48
+ "disable_amp": false,
49
+ "useadam8bit": false
50
+ }
Training Settings/Unet/2_burnt_UNET_extra_cfg.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": "train.json",
3
+ "amp": false,
4
+ "batch_size": 4,
5
+ "ckpt_every_n_minutes": null,
6
+ "clip_grad_norm": null,
7
+ "clip_skip": 0,
8
+ "cond_dropout": 0.04,
9
+ "data_root": "D:\\ED2\\EveryDream2trainer\\input\\merab_6_longer_adjusted_captions",
10
+ "disable_textenc_training": true,
11
+ "disable_xformers": false,
12
+ "flip_p": 0.0,
13
+ "gpuid": 0,
14
+ "gradient_checkpointing": true,
15
+ "grad_accum": 1,
16
+ "logdir": "logs",
17
+ "log_step": 1,
18
+ "lowvram": false,
19
+ "lr": 9e-06,
20
+ "lr_decay_steps": 0,
21
+ "lr_scheduler": "constant",
22
+ "lr_warmup_steps": 0,
23
+ "max_epochs": 500,
24
+ "notebook": false,
25
+ "optimizer_config": "optimizer.json",
26
+ "project_name": "burnt_UNET_extra",
27
+ "resolution": 512,
28
+ "resume_ckpt": "logs\\burnt_unet_20230509-071333\\ckpts\\last-burnt_unet-ep499-gs03500",
29
+ "run_name": null,
30
+ "sample_prompts": "sample_prompts.txt",
31
+ "sample_steps": 5000000,
32
+ "save_ckpt_dir": null,
33
+ "save_ckpts_from_n_epochs": 0,
34
+ "save_every_n_epochs": 100,
35
+ "save_optimizer": false,
36
+ "scale_lr": false,
37
+ "seed": 555,
38
+ "shuffle_tags": false,
39
+ "validation_config": "validation_default.json",
40
+ "wandb": false,
41
+ "write_schedule": false,
42
+ "rated_dataset": false,
43
+ "rated_dataset_target_dropout_percent": 50,
44
+ "zero_frequency_noise_ratio": 0.0,
45
+ "save_full_precision": false,
46
+ "disable_unet_training": false,
47
+ "rated_dataset_target_dropout_rate": 50,
48
+ "disable_amp": false,
49
+ "useadam8bit": false
50
+ }
Training Settings/burnt_base-20230508-210229_cfg.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": "train.json",
3
+ "amp": false,
4
+ "batch_size": 4,
5
+ "ckpt_every_n_minutes": null,
6
+ "clip_grad_norm": null,
7
+ "clip_skip": 0,
8
+ "cond_dropout": 0.04,
9
+ "data_root": "D:\\ED2\\EveryDream2trainer\\input\\merab_6_longer_adjusted_captions",
10
+ "disable_textenc_training": false,
11
+ "disable_xformers": false,
12
+ "flip_p": 0.0,
13
+ "gpuid": 0,
14
+ "gradient_checkpointing": true,
15
+ "grad_accum": 1,
16
+ "logdir": "logs",
17
+ "log_step": 1,
18
+ "lowvram": false,
19
+ "lr": 1.5e-06,
20
+ "lr_decay_steps": 0,
21
+ "lr_scheduler": "constant",
22
+ "lr_warmup_steps": 0,
23
+ "max_epochs": 50,
24
+ "notebook": false,
25
+ "optimizer_config": "optimizer.json",
26
+ "project_name": "burnt_base",
27
+ "resolution": 512,
28
+ "resume_ckpt": "sd_v1-5_vae",
29
+ "run_name": null,
30
+ "sample_prompts": "sample_prompts.txt",
31
+ "sample_steps": 5000000,
32
+ "save_ckpt_dir": null,
33
+ "save_ckpts_from_n_epochs": 0,
34
+ "save_every_n_epochs": 50,
35
+ "save_optimizer": false,
36
+ "scale_lr": false,
37
+ "seed": 555,
38
+ "shuffle_tags": false,
39
+ "validation_config": "validation_default.json",
40
+ "wandb": false,
41
+ "write_schedule": false,
42
+ "rated_dataset": false,
43
+ "rated_dataset_target_dropout_percent": 50,
44
+ "zero_frequency_noise_ratio": 0.0,
45
+ "save_full_precision": false,
46
+ "disable_unet_training": false,
47
+ "rated_dataset_target_dropout_rate": 50,
48
+ "disable_amp": false,
49
+ "useadam8bit": false
50
+ }
Training Settings/optimizer.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "doc": {
3
+ "optimizer": "adamw, adamw8bit, lion",
4
+ "optimizer_desc": "'adamw' in standard 32bit, 'adamw8bit' is bitsandbytes, 'lion' is lucidrains",
5
+ "lr": "learning rate, if null wil use CLI or main JSON config value",
6
+ "betas": "exponential decay rates for the moment estimates",
7
+ "epsilon": "value added to denominator for numerical stability, unused for lion",
8
+ "weight_decay": "weight decay (L2 penalty)",
9
+ "text_encoder_lr_scale": "scale the text encoder LR relative to the Unet LR. for example, if `lr` is 2e-6 and `text_encoder_lr_scale` is 0.5, the text encoder's LR will be set to `1e-6`."
10
+ },
11
+ "optimizer": "adamw8bit",
12
+ "lr": 1e-6,
13
+ "betas": [0.9, 0.999],
14
+ "epsilon": 1e-8,
15
+ "weight_decay": 0.010,
16
+ "text_encoder_lr_scale": 1.0
17
+ }
Training Settings/validation_default.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "documentation": {
3
+ "validate_training": "If true, validate the training using a separate set of image/caption pairs, and log the results as `loss/val`. The curve will trend downwards as the model trains, then flatten and start to trend upwards as effective training finishes and the model begins to overfit the training data. Very useful for preventing overfitting, for checking if your learning rate is too low or too high, and for deciding when to stop training.",
4
+ "val_split_mode": "Either 'automatic' or 'manual', ignored if validate_training is false. 'automatic' val_split_mode picks a random subset of the training set (the number of items is controlled by val_split_proportion) and removes them from training to use as a validation set. 'manual' val_split_mode lets you provide your own folder of validation items (images+captions), specified using 'val_data_root'.",
5
+ "val_split_proportion": "For 'automatic' val_split_mode, how much of the train dataset that should be removed to use for validation. Typical values are 0.15-0.2 (15-20% of the total dataset). Higher is more accurate but slower.",
6
+ "val_data_root": "For 'manual' val_split_mode, the path to a folder containing validation items.",
7
+ "stabilize_training_loss": "If true, stabilize the train loss curves for `loss/epoch` and `loss/log step` by re-calculating training loss with a fixed random seed, and log the results as `loss/train-stabilized`. This more clearly shows the training progress, but it is not enough alone to tell you if you're overfitting.",
8
+ "stabilize_split_proportion": "For stabilize_training_loss, the proportion of the train dataset to overlap for stabilizing the train loss graph. Typical values are 0.15-0.2 (15-20% of the total dataset). Higher is more accurate but slower.",
9
+ "every_n_epochs": "How often to run validation (1=every epoch).",
10
+ "seed": "The seed to use when running validation and stabilization passes."
11
+ },
12
+ "validate_training": true,
13
+ "val_split_mode": "automatic",
14
+ "val_data_root": null,
15
+ "val_split_proportion": 0.15,
16
+ "stabilize_training_loss": true,
17
+ "stabilize_split_proportion": 0.15,
18
+ "every_n_epochs": 1,
19
+ "seed": 555
20
+ }