Guizmus commited on
Commit
88520b9
1 Parent(s): b77a4f6
args.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "disable_cudnn_benchmark": true,
3
+ "use_text_files_as_captions": false,
4
+ "stop_text_encoder_training": 999999999999999,
5
+ "use_bucketing": true,
6
+ "regenerate_latent_cache": false,
7
+ "save_latents_cache": false,
8
+ "sample_on_training_start": true,
9
+ "add_class_images_to_dataset": false,
10
+ "auto_balance_concept_datasets": false,
11
+ "sample_aspect_ratios": false,
12
+ "dataset_repeats": 40,
13
+ "save_every_n_epoch": 1,
14
+ "pretrained_model_name_or_path": "F:\\AI\\Software\\DreamboothSimpleUI TESTS\\data\\diffusers\\SD-2.1",
15
+ "pretrained_vae_name_or_path": "",
16
+ "tokenizer_name": null,
17
+ "instance_data_dir": null,
18
+ "class_data_dir": null,
19
+ "instance_prompt": null,
20
+ "class_prompt": null,
21
+ "save_sample_prompt": null,
22
+ "n_save_sample": 4,
23
+ "sample_height": 768,
24
+ "sample_width": 768,
25
+ "save_guidance_scale": 7.5,
26
+ "save_infer_steps": 30,
27
+ "with_prior_preservation": false,
28
+ "prior_loss_weight": 1.0,
29
+ "num_class_images": 1000,
30
+ "output_dir": "models/mosaic",
31
+ "seed": 3434554,
32
+ "resolution": 768,
33
+ "center_crop": false,
34
+ "train_text_encoder": true,
35
+ "train_batch_size": 4,
36
+ "sample_batch_size": 4,
37
+ "num_train_epochs": 6,
38
+ "max_train_steps": 1680,
39
+ "gradient_accumulation_steps": 1,
40
+ "gradient_checkpointing": true,
41
+ "learning_rate": 1e-06,
42
+ "scale_lr": false,
43
+ "lr_scheduler": "constant",
44
+ "lr_warmup_steps": 0,
45
+ "use_8bit_adam": false,
46
+ "adam_beta1": 0.9,
47
+ "adam_beta2": 0.999,
48
+ "adam_weight_decay": 0.01,
49
+ "adam_epsilon": 1e-08,
50
+ "max_grad_norm": 1.0,
51
+ "push_to_hub": false,
52
+ "hub_token": null,
53
+ "hub_model_id": null,
54
+ "logging_dir": "logs",
55
+ "log_interval": 10,
56
+ "sample_step_interval": 500,
57
+ "mixed_precision": "fp16",
58
+ "not_cache_latents": false,
59
+ "local_rank": -1,
60
+ "concepts_list": [
61
+ {
62
+ "instance_prompt": "Mosaic Art",
63
+ "class_prompt": "Artstyle",
64
+ "instance_data_dir": "F:/AI/Data/Datasets/Captionned/good caption/Mosaic",
65
+ "class_data_dir": "F:/AI/Data/Datasets/Regularisation/Artstyle",
66
+ "do_not_balance": 0
67
+ }
68
+ ],
69
+ "save_sample_controlled_seed": null,
70
+ "delete_checkpoints_when_full_drive": false,
71
+ "send_telegram_updates": false,
72
+ "telegram_chat_id": "0",
73
+ "telegram_token": "0",
74
+ "use_deepspeed_adam": false,
75
+ "append_sample_controlled_seed_action": null,
76
+ "add_sample_prompt": [
77
+ "Mosaic art"
78
+ ],
79
+ "use_image_names_as_captions": true
80
+ }
feature_extractor/preprocessor_config.json DELETED
@@ -1,28 +0,0 @@
1
- {
2
- "crop_size": {
3
- "height": 224,
4
- "width": 224
5
- },
6
- "do_center_crop": true,
7
- "do_convert_rgb": true,
8
- "do_normalize": true,
9
- "do_rescale": true,
10
- "do_resize": true,
11
- "feature_extractor_type": "CLIPFeatureExtractor",
12
- "image_mean": [
13
- 0.48145466,
14
- 0.4578275,
15
- 0.40821073
16
- ],
17
- "image_processor_type": "CLIPImageProcessor",
18
- "image_std": [
19
- 0.26862954,
20
- 0.26130258,
21
- 0.27577711
22
- ],
23
- "resample": 3,
24
- "rescale_factor": 0.00392156862745098,
25
- "size": {
26
- "shortest_edge": 224
27
- }
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model_index.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
- "_diffusers_version": "0.10.0.dev0",
4
  "feature_extractor": [
5
- "transformers",
6
- "CLIPImageProcessor"
7
  ],
8
- "requires_safety_checker": true,
9
  "safety_checker": [
10
- "stable_diffusion",
11
- "StableDiffusionSafetyChecker"
12
  ],
13
  "scheduler": [
14
  "diffusers",
15
- "PNDMScheduler"
16
  ],
17
  "text_encoder": [
18
  "transformers",
 
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.10.2",
4
  "feature_extractor": [
5
+ null,
6
+ null
7
  ],
8
+ "requires_safety_checker": false,
9
  "safety_checker": [
10
+ null,
11
+ null
12
  ],
13
  "scheduler": [
14
  "diffusers",
15
+ "DDIMScheduler"
16
  ],
17
  "text_encoder": [
18
  "transformers",
safety_checker/config.json DELETED
@@ -1,181 +0,0 @@
1
- {
2
- "_commit_hash": "cb41f3a270d63d454d385fc2e4f571c487c253c5",
3
- "_name_or_path": "CompVis/stable-diffusion-safety-checker",
4
- "architectures": [
5
- "StableDiffusionSafetyChecker"
6
- ],
7
- "initializer_factor": 1.0,
8
- "logit_scale_init_value": 2.6592,
9
- "model_type": "clip",
10
- "projection_dim": 768,
11
- "text_config": {
12
- "_name_or_path": "",
13
- "add_cross_attention": false,
14
- "architectures": null,
15
- "attention_dropout": 0.0,
16
- "bad_words_ids": null,
17
- "begin_suppress_tokens": null,
18
- "bos_token_id": 0,
19
- "chunk_size_feed_forward": 0,
20
- "cross_attention_hidden_size": null,
21
- "decoder_start_token_id": null,
22
- "diversity_penalty": 0.0,
23
- "do_sample": false,
24
- "dropout": 0.0,
25
- "early_stopping": false,
26
- "encoder_no_repeat_ngram_size": 0,
27
- "eos_token_id": 2,
28
- "exponential_decay_length_penalty": null,
29
- "finetuning_task": null,
30
- "forced_bos_token_id": null,
31
- "forced_eos_token_id": null,
32
- "hidden_act": "quick_gelu",
33
- "hidden_size": 768,
34
- "id2label": {
35
- "0": "LABEL_0",
36
- "1": "LABEL_1"
37
- },
38
- "initializer_factor": 1.0,
39
- "initializer_range": 0.02,
40
- "intermediate_size": 3072,
41
- "is_decoder": false,
42
- "is_encoder_decoder": false,
43
- "label2id": {
44
- "LABEL_0": 0,
45
- "LABEL_1": 1
46
- },
47
- "layer_norm_eps": 1e-05,
48
- "length_penalty": 1.0,
49
- "max_length": 20,
50
- "max_position_embeddings": 77,
51
- "min_length": 0,
52
- "model_type": "clip_text_model",
53
- "no_repeat_ngram_size": 0,
54
- "num_attention_heads": 12,
55
- "num_beam_groups": 1,
56
- "num_beams": 1,
57
- "num_hidden_layers": 12,
58
- "num_return_sequences": 1,
59
- "output_attentions": false,
60
- "output_hidden_states": false,
61
- "output_scores": false,
62
- "pad_token_id": 1,
63
- "prefix": null,
64
- "problem_type": null,
65
- "projection_dim": 512,
66
- "pruned_heads": {},
67
- "remove_invalid_values": false,
68
- "repetition_penalty": 1.0,
69
- "return_dict": true,
70
- "return_dict_in_generate": false,
71
- "sep_token_id": null,
72
- "suppress_tokens": null,
73
- "task_specific_params": null,
74
- "temperature": 1.0,
75
- "tf_legacy_loss": false,
76
- "tie_encoder_decoder": false,
77
- "tie_word_embeddings": true,
78
- "tokenizer_class": null,
79
- "top_k": 50,
80
- "top_p": 1.0,
81
- "torch_dtype": null,
82
- "torchscript": false,
83
- "transformers_version": "4.26.0.dev0",
84
- "typical_p": 1.0,
85
- "use_bfloat16": false,
86
- "vocab_size": 49408
87
- },
88
- "text_config_dict": {
89
- "hidden_size": 768,
90
- "intermediate_size": 3072,
91
- "num_attention_heads": 12,
92
- "num_hidden_layers": 12
93
- },
94
- "torch_dtype": "float32",
95
- "transformers_version": null,
96
- "vision_config": {
97
- "_name_or_path": "",
98
- "add_cross_attention": false,
99
- "architectures": null,
100
- "attention_dropout": 0.0,
101
- "bad_words_ids": null,
102
- "begin_suppress_tokens": null,
103
- "bos_token_id": null,
104
- "chunk_size_feed_forward": 0,
105
- "cross_attention_hidden_size": null,
106
- "decoder_start_token_id": null,
107
- "diversity_penalty": 0.0,
108
- "do_sample": false,
109
- "dropout": 0.0,
110
- "early_stopping": false,
111
- "encoder_no_repeat_ngram_size": 0,
112
- "eos_token_id": null,
113
- "exponential_decay_length_penalty": null,
114
- "finetuning_task": null,
115
- "forced_bos_token_id": null,
116
- "forced_eos_token_id": null,
117
- "hidden_act": "quick_gelu",
118
- "hidden_size": 1024,
119
- "id2label": {
120
- "0": "LABEL_0",
121
- "1": "LABEL_1"
122
- },
123
- "image_size": 224,
124
- "initializer_factor": 1.0,
125
- "initializer_range": 0.02,
126
- "intermediate_size": 4096,
127
- "is_decoder": false,
128
- "is_encoder_decoder": false,
129
- "label2id": {
130
- "LABEL_0": 0,
131
- "LABEL_1": 1
132
- },
133
- "layer_norm_eps": 1e-05,
134
- "length_penalty": 1.0,
135
- "max_length": 20,
136
- "min_length": 0,
137
- "model_type": "clip_vision_model",
138
- "no_repeat_ngram_size": 0,
139
- "num_attention_heads": 16,
140
- "num_beam_groups": 1,
141
- "num_beams": 1,
142
- "num_channels": 3,
143
- "num_hidden_layers": 24,
144
- "num_return_sequences": 1,
145
- "output_attentions": false,
146
- "output_hidden_states": false,
147
- "output_scores": false,
148
- "pad_token_id": null,
149
- "patch_size": 14,
150
- "prefix": null,
151
- "problem_type": null,
152
- "projection_dim": 512,
153
- "pruned_heads": {},
154
- "remove_invalid_values": false,
155
- "repetition_penalty": 1.0,
156
- "return_dict": true,
157
- "return_dict_in_generate": false,
158
- "sep_token_id": null,
159
- "suppress_tokens": null,
160
- "task_specific_params": null,
161
- "temperature": 1.0,
162
- "tf_legacy_loss": false,
163
- "tie_encoder_decoder": false,
164
- "tie_word_embeddings": true,
165
- "tokenizer_class": null,
166
- "top_k": 50,
167
- "top_p": 1.0,
168
- "torch_dtype": null,
169
- "torchscript": false,
170
- "transformers_version": "4.26.0.dev0",
171
- "typical_p": 1.0,
172
- "use_bfloat16": false
173
- },
174
- "vision_config_dict": {
175
- "hidden_size": 1024,
176
- "intermediate_size": 4096,
177
- "num_attention_heads": 16,
178
- "num_hidden_layers": 24,
179
- "patch_size": 14
180
- }
181
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
safety_checker/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:16d28f2b37109f222cdc33620fdd262102ac32112be0352a7f77e9614b35a394
3
- size 1216064769
 
 
 
 
scheduler/scheduler_config.json CHANGED
@@ -1,14 +1,21 @@
1
  {
2
- "_class_name": "PNDMScheduler",
3
- "_diffusers_version": "0.10.0.dev0",
 
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "clip_sample": false,
 
 
8
  "num_train_timesteps": 1000,
9
- "prediction_type": "epsilon",
 
10
  "set_alpha_to_one": false,
11
  "skip_prk_steps": true,
 
 
12
  "steps_offset": 1,
 
13
  "trained_betas": null
14
  }
 
1
  {
2
+ "_class_name": "DPMSolverMultistepScheduler",
3
+ "_diffusers_version": "0.10.2",
4
+ "algorithm_type": "dpmsolver++",
5
  "beta_end": 0.012,
6
  "beta_schedule": "scaled_linear",
7
  "beta_start": 0.00085,
8
  "clip_sample": false,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "lower_order_final": true,
11
  "num_train_timesteps": 1000,
12
+ "prediction_type": "v_prediction",
13
+ "sample_max_value": 1.0,
14
  "set_alpha_to_one": false,
15
  "skip_prk_steps": true,
16
+ "solver_order": 2,
17
+ "solver_type": "midpoint",
18
  "steps_offset": 1,
19
+ "thresholding": false,
20
  "trained_betas": null
21
  }
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -7,19 +7,19 @@
7
  "bos_token_id": 0,
8
  "dropout": 0.0,
9
  "eos_token_id": 2,
10
- "hidden_act": "quick_gelu",
11
- "hidden_size": 768,
12
  "initializer_factor": 1.0,
13
  "initializer_range": 0.02,
14
- "intermediate_size": 3072,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 77,
17
  "model_type": "clip_text_model",
18
- "num_attention_heads": 12,
19
- "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
- "projection_dim": 768,
22
  "torch_dtype": "float32",
23
- "transformers_version": "4.26.0.dev0",
24
  "vocab_size": 49408
25
  }
 
1
  {
2
+ "_name_or_path": "F:\\AI\\Software\\DreamboothSimpleUI TESTS\\data\\diffusers\\SD-2.1",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
7
  "bos_token_id": 0,
8
  "dropout": 0.0,
9
  "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1024,
12
  "initializer_factor": 1.0,
13
  "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 77,
17
  "model_type": "clip_text_model",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 23,
20
  "pad_token_id": 1,
21
+ "projection_dim": 512,
22
  "torch_dtype": "float32",
23
+ "transformers_version": "4.25.1",
24
  "vocab_size": 49408
25
  }
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b1808fc6f65f5f6f9ea8412d110dc59f93cf6c6d5a41fc9aea987cfc41fafac
3
- size 492307041
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aed483ba4fb3e2da2c29a257ab6b4c683ac0accd56d77ed5b1df816808aeb292
3
+ size 1361677143
tokenizer/special_tokens_map.json CHANGED
@@ -13,7 +13,7 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "<|endoftext|>",
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "!",
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
 
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "F:\\AI\\Software\\DreamboothSimpleUI TESTS\\data\\diffusers\\SD-2.1\\tokenizer",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json CHANGED
@@ -1,8 +1,14 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.10.0.dev0",
 
4
  "act_fn": "silu",
5
- "attention_head_dim": 8,
 
 
 
 
 
6
  "block_out_channels": [
7
  320,
8
  640,
@@ -10,7 +16,7 @@
10
  1280
11
  ],
12
  "center_input_sample": false,
13
- "cross_attention_dim": 768,
14
  "down_block_types": [
15
  "CrossAttnDownBlock2D",
16
  "CrossAttnDownBlock2D",
@@ -29,12 +35,13 @@
29
  "num_class_embeds": null,
30
  "only_cross_attention": false,
31
  "out_channels": 4,
32
- "sample_size": 64,
33
  "up_block_types": [
34
  "UpBlock2D",
35
  "CrossAttnUpBlock2D",
36
  "CrossAttnUpBlock2D",
37
  "CrossAttnUpBlock2D"
38
  ],
39
- "use_linear_projection": false
 
40
  }
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.10.2",
4
+ "_name_or_path": "F:\\AI\\Software\\DreamboothSimpleUI TESTS\\data\\diffusers\\SD-2.1",
5
  "act_fn": "silu",
6
+ "attention_head_dim": [
7
+ 5,
8
+ 10,
9
+ 20,
10
+ 20
11
+ ],
12
  "block_out_channels": [
13
  320,
14
  640,
 
16
  1280
17
  ],
18
  "center_input_sample": false,
19
+ "cross_attention_dim": 1024,
20
  "down_block_types": [
21
  "CrossAttnDownBlock2D",
22
  "CrossAttnDownBlock2D",
 
35
  "num_class_embeds": null,
36
  "only_cross_attention": false,
37
  "out_channels": 4,
38
+ "sample_size": 96,
39
  "up_block_types": [
40
  "UpBlock2D",
41
  "CrossAttnUpBlock2D",
42
  "CrossAttnUpBlock2D",
43
  "CrossAttnUpBlock2D"
44
  ],
45
+ "upcast_attention": true,
46
+ "use_linear_projection": true
47
  }
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13c8d29ed3cb16d19c81c953ad4c3906928c5e4837d69595cfa47b5d9dce2c22
3
- size 3438366373
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ebea392218fe6b1b70c9bb37c90ccce17d342e6bd745641d7ca650af1de7f6
3
+ size 3463923045
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.10.0.dev0",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
@@ -19,7 +20,7 @@
19
  "layers_per_block": 2,
20
  "norm_num_groups": 32,
21
  "out_channels": 3,
22
- "sample_size": 512,
23
  "up_block_types": [
24
  "UpDecoderBlock2D",
25
  "UpDecoderBlock2D",
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.10.2",
4
+ "_name_or_path": "F:\\AI\\Software\\DreamboothSimpleUI TESTS\\data\\diffusers\\SD-2.1",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
 
20
  "layers_per_block": 2,
21
  "norm_num_groups": 32,
22
  "out_channels": 3,
23
+ "sample_size": 768,
24
  "up_block_types": [
25
  "UpDecoderBlock2D",
26
  "UpDecoderBlock2D",
vae/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcf4507d99b88db73f3916e2a20169fe74ada6b5582e9af56cfa80f5f3141765
3
- size 334711857
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b4889b6b1d4ce7ae320a02dedaeff1780ad77d415ea0d744b476155c6377ddc
3
+ size 334707217