Guirl with yellow and green clothes

#37
.gitattributes CHANGED
@@ -33,11 +33,3 @@ safety_checker/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
33
  text_encoder/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
34
  unet/diffusion_pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
35
  vae/diffusion_pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
36
- text_encoder/model.safetensors filter=lfs diff=lfs merge=lfs -text
37
- unet/diffusion_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
38
- vae/diffusion_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
39
- safety_checker/model.safetensors filter=lfs diff=lfs merge=lfs -text
40
- unet/diffusion_pytorch_model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
41
- text_encoder/model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
42
- vae/diffusion_pytorch_model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
43
- safety_checker/model.fp16.safetensors filter=lfs diff=lfs merge=lfs -text
33
  text_encoder/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
34
  unet/diffusion_pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
35
  vae/diffusion_pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -5,19 +5,17 @@ tags:
5
  - stable-diffusion
6
  - text-to-image
7
  license: creativeml-openrail-m
8
- inference: true
9
 
10
  ---
11
 
12
- # waifu-diffusion v1.4 - Diffusion for Weebs
13
 
14
  waifu-diffusion is a latent text-to-image diffusion model that has been conditioned on high-quality anime images through fine-tuning.
15
 
16
- ![image](https://user-images.githubusercontent.com/26317155/210155933-db3a5f1a-1ec3-4777-915c-6deff2841ce9.png)
17
 
18
- <sub>masterpiece, best quality, 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, watercolor, night, turtleneck</sub>
19
-
20
- [Original Weights](https://huggingface.co/hakurei/waifu-diffusion-v1-4)
21
 
22
  # Gradio & Colab
23
 
@@ -64,9 +62,9 @@ image.save("test.png")
64
 
65
  ## Team Members and Acknowledgements
66
 
67
- This project would not have been possible without the incredible work by Stability AI and Novel AI.
68
 
69
- - [Haru](https://github.com/harubaru)
70
  - [Salt](https://github.com/sALTaccount/)
71
  - [Sta @ Bit192](https://twitter.com/naclbbr)
72
 
5
  - stable-diffusion
6
  - text-to-image
7
  license: creativeml-openrail-m
8
+ inference: false
9
 
10
  ---
11
 
12
+ # waifu-diffusion v1.3 - Diffusion for Weebs
13
 
14
  waifu-diffusion is a latent text-to-image diffusion model that has been conditioned on high-quality anime images through fine-tuning.
15
 
16
+ <img src=https://i.imgur.com/Y5Tmw1S.png width=75% height=75%>
17
 
18
+ [Original Weights](https://huggingface.co/hakurei/waifu-diffusion-v1-3)
 
 
19
 
20
  # Gradio & Colab
21
 
62
 
63
  ## Team Members and Acknowledgements
64
 
65
+ This project would not have been possible without the incredible work by the [CompVis Researchers](https://ommer-lab.com/).
66
 
67
+ - [Anthony Mercurio](https://github.com/harubaru)
68
  - [Salt](https://github.com/sALTaccount/)
69
  - [Sta @ Bit192](https://twitter.com/naclbbr)
70
 
feature_extractor/preprocessor_config.json CHANGED
@@ -1,12 +1,8 @@
1
  {
2
- "crop_size": {
3
- "height": 224,
4
- "width": 224
5
- },
6
  "do_center_crop": true,
7
  "do_convert_rgb": true,
8
  "do_normalize": true,
9
- "do_rescale": true,
10
  "do_resize": true,
11
  "feature_extractor_type": "CLIPFeatureExtractor",
12
  "image_mean": [
@@ -14,15 +10,11 @@
14
  0.4578275,
15
  0.40821073
16
  ],
17
- "image_processor_type": "CLIPImageProcessor",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
21
  0.27577711
22
  ],
23
  "resample": 3,
24
- "rescale_factor": 0.00392156862745098,
25
- "size": {
26
- "shortest_edge": 224
27
- }
28
  }
1
  {
2
+ "crop_size": 224,
 
 
 
3
  "do_center_crop": true,
4
  "do_convert_rgb": true,
5
  "do_normalize": true,
 
6
  "do_resize": true,
7
  "feature_extractor_type": "CLIPFeatureExtractor",
8
  "image_mean": [
10
  0.4578275,
11
  0.40821073
12
  ],
 
13
  "image_std": [
14
  0.26862954,
15
  0.26130258,
16
  0.27577711
17
  ],
18
  "resample": 3,
19
+ "size": 224
 
 
 
20
  }
model_index.json CHANGED
@@ -1,18 +1,17 @@
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
- "_diffusers_version": "0.10.2",
4
  "feature_extractor": [
5
  "transformers",
6
- "CLIPImageProcessor"
7
  ],
8
- "requires_safety_checker": true,
9
  "safety_checker": [
10
  "stable_diffusion",
11
  "StableDiffusionSafetyChecker"
12
  ],
13
  "scheduler": [
14
  "diffusers",
15
- "PNDMScheduler"
16
  ],
17
  "text_encoder": [
18
  "transformers",
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.4.1",
4
  "feature_extractor": [
5
  "transformers",
6
+ "CLIPFeatureExtractor"
7
  ],
 
8
  "safety_checker": [
9
  "stable_diffusion",
10
  "StableDiffusionSafetyChecker"
11
  ],
12
  "scheduler": [
13
  "diffusers",
14
+ "LMSDiscreteScheduler"
15
  ],
16
  "text_encoder": [
17
  "transformers",
safety_checker/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_commit_hash": "cb41f3a270d63d454d385fc2e4f571c487c253c5",
3
  "_name_or_path": "CompVis/stable-diffusion-safety-checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
@@ -14,7 +14,6 @@
14
  "architectures": null,
15
  "attention_dropout": 0.0,
16
  "bad_words_ids": null,
17
- "begin_suppress_tokens": null,
18
  "bos_token_id": 0,
19
  "chunk_size_feed_forward": 0,
20
  "cross_attention_hidden_size": null,
@@ -62,14 +61,12 @@
62
  "pad_token_id": 1,
63
  "prefix": null,
64
  "problem_type": null,
65
- "projection_dim": 512,
66
  "pruned_heads": {},
67
  "remove_invalid_values": false,
68
  "repetition_penalty": 1.0,
69
  "return_dict": true,
70
  "return_dict_in_generate": false,
71
  "sep_token_id": null,
72
- "suppress_tokens": null,
73
  "task_specific_params": null,
74
  "temperature": 1.0,
75
  "tf_legacy_loss": false,
@@ -80,7 +77,7 @@
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
- "transformers_version": "4.25.1",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
@@ -99,7 +96,6 @@
99
  "architectures": null,
100
  "attention_dropout": 0.0,
101
  "bad_words_ids": null,
102
- "begin_suppress_tokens": null,
103
  "bos_token_id": null,
104
  "chunk_size_feed_forward": 0,
105
  "cross_attention_hidden_size": null,
@@ -149,14 +145,12 @@
149
  "patch_size": 14,
150
  "prefix": null,
151
  "problem_type": null,
152
- "projection_dim": 512,
153
  "pruned_heads": {},
154
  "remove_invalid_values": false,
155
  "repetition_penalty": 1.0,
156
  "return_dict": true,
157
  "return_dict_in_generate": false,
158
  "sep_token_id": null,
159
- "suppress_tokens": null,
160
  "task_specific_params": null,
161
  "temperature": 1.0,
162
  "tf_legacy_loss": false,
@@ -167,7 +161,7 @@
167
  "top_p": 1.0,
168
  "torch_dtype": null,
169
  "torchscript": false,
170
- "transformers_version": "4.25.1",
171
  "typical_p": 1.0,
172
  "use_bfloat16": false
173
  },
1
  {
2
+ "_commit_hash": null,
3
  "_name_or_path": "CompVis/stable-diffusion-safety-checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
14
  "architectures": null,
15
  "attention_dropout": 0.0,
16
  "bad_words_ids": null,
 
17
  "bos_token_id": 0,
18
  "chunk_size_feed_forward": 0,
19
  "cross_attention_hidden_size": null,
61
  "pad_token_id": 1,
62
  "prefix": null,
63
  "problem_type": null,
 
64
  "pruned_heads": {},
65
  "remove_invalid_values": false,
66
  "repetition_penalty": 1.0,
67
  "return_dict": true,
68
  "return_dict_in_generate": false,
69
  "sep_token_id": null,
 
70
  "task_specific_params": null,
71
  "temperature": 1.0,
72
  "tf_legacy_loss": false,
77
  "top_p": 1.0,
78
  "torch_dtype": null,
79
  "torchscript": false,
80
+ "transformers_version": "4.22.2",
81
  "typical_p": 1.0,
82
  "use_bfloat16": false,
83
  "vocab_size": 49408
96
  "architectures": null,
97
  "attention_dropout": 0.0,
98
  "bad_words_ids": null,
 
99
  "bos_token_id": null,
100
  "chunk_size_feed_forward": 0,
101
  "cross_attention_hidden_size": null,
145
  "patch_size": 14,
146
  "prefix": null,
147
  "problem_type": null,
 
148
  "pruned_heads": {},
149
  "remove_invalid_values": false,
150
  "repetition_penalty": 1.0,
151
  "return_dict": true,
152
  "return_dict_in_generate": false,
153
  "sep_token_id": null,
 
154
  "task_specific_params": null,
155
  "temperature": 1.0,
156
  "tf_legacy_loss": false,
161
  "top_p": 1.0,
162
  "torch_dtype": null,
163
  "torchscript": false,
164
+ "transformers_version": "4.22.2",
165
  "typical_p": 1.0,
166
  "use_bfloat16": false
167
  },
safety_checker/model.fp16.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:08902f19b1cfebd7c989f152fc0507bef6898c706a91d666509383122324b511
3
- size 608018440
 
 
 
safety_checker/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d6a233ff6fd5ccb9f76fd99618d73369c52dd3d8222376384d0e601911089e8
3
- size 1215981830
 
 
 
safety_checker/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16d28f2b37109f222cdc33620fdd262102ac32112be0352a7f77e9614b35a394
3
- size 1216064769
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:193490b58ef62739077262e833bf091c66c29488058681ac25cf7df3d8190974
3
+ size 1216061799
safety_checker/pytorch_model.fp16.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:22ba87205445ad5def13e54919b038dcfb7321ec1c3f4b12487d4fba6036125f
3
- size 608103564
 
 
 
scheduler/scheduler_config.json CHANGED
@@ -1,14 +1,9 @@
1
  {
2
- "_class_name": "PNDMScheduler",
3
- "_diffusers_version": "0.10.2",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
- "clip_sample": false,
8
  "num_train_timesteps": 1000,
9
- "prediction_type": "epsilon",
10
- "set_alpha_to_one": false,
11
- "skip_prk_steps": true,
12
- "steps_offset": 1,
13
  "trained_betas": null
14
  }
1
  {
2
+ "_class_name": "LMSDiscreteScheduler",
3
+ "_diffusers_version": "0.4.1",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
 
7
  "num_train_timesteps": 1000,
 
 
 
 
8
  "trained_betas": null
9
  }
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/mnt/sd-finetune-data/finetunes/step_57000",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -7,19 +7,19 @@
7
  "bos_token_id": 0,
8
  "dropout": 0.0,
9
  "eos_token_id": 2,
10
- "hidden_act": "gelu",
11
- "hidden_size": 1024,
12
  "initializer_factor": 1.0,
13
  "initializer_range": 0.02,
14
- "intermediate_size": 4096,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 77,
17
  "model_type": "clip_text_model",
18
- "num_attention_heads": 16,
19
- "num_hidden_layers": 23,
20
  "pad_token_id": 1,
21
- "projection_dim": 512,
22
  "torch_dtype": "float32",
23
- "transformers_version": "4.25.1",
24
  "vocab_size": 49408
25
  }
1
  {
2
+ "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
7
  "bos_token_id": 0,
8
  "dropout": 0.0,
9
  "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
  "initializer_factor": 1.0,
13
  "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
  "layer_norm_eps": 1e-05,
16
  "max_position_embeddings": 77,
17
  "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
+ "projection_dim": 768,
22
  "torch_dtype": "float32",
23
+ "transformers_version": "4.22.2",
24
  "vocab_size": 49408
25
  }
text_encoder/model.fp16.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:22bc8e104d064b678ef7d2d2b217d4a8c9bfb79fb35792417cdf228e70adc7fb
3
- size 680821096
 
 
 
text_encoder/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:661a5d7f8e19fce696aa9d932ab97b546b4d4a2a2d87238a17761bef2704269f
3
- size 1361597016
 
 
 
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:040fc6498aa3cdbb926dc2d01c3d6629521e5f085d901d5e8d8c2b0e0aa2b1ce
3
- size 1361679905
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:770a47a9ffdcfda0b05506a7888ed714d06131d60267e6cf52765d61cf59fd67
3
+ size 492305335
text_encoder/pytorch_model.fp16.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba47a6c751cce5b6c4d5c79c8cd63bab63bfce3539e7805a70b8dca9d1f2f151
3
- size 680900852
 
 
 
tokenizer/special_tokens_map.json CHANGED
@@ -13,7 +13,7 @@
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
- "pad_token": "!",
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "<|endoftext|>",
17
  "unk_token": {
18
  "content": "<|endoftext|>",
19
  "lstrip": false,
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "/mnt/sd-finetune-data/finetunes/step_57000",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json CHANGED
@@ -1,14 +1,8 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.10.2",
4
- "_name_or_path": "/mnt/sd-finetune-data/finetunes/step_57000",
5
  "act_fn": "silu",
6
- "attention_head_dim": [
7
- 5,
8
- 10,
9
- 20,
10
- 20
11
- ],
12
  "block_out_channels": [
13
  320,
14
  640,
@@ -16,7 +10,7 @@
16
  1280
17
  ],
18
  "center_input_sample": false,
19
- "cross_attention_dim": 1024,
20
  "down_block_types": [
21
  "CrossAttnDownBlock2D",
22
  "CrossAttnDownBlock2D",
@@ -24,7 +18,6 @@
24
  "DownBlock2D"
25
  ],
26
  "downsample_padding": 1,
27
- "dual_cross_attention": false,
28
  "flip_sin_to_cos": true,
29
  "freq_shift": 0,
30
  "in_channels": 4,
@@ -32,16 +25,12 @@
32
  "mid_block_scale_factor": 1,
33
  "norm_eps": 1e-05,
34
  "norm_num_groups": 32,
35
- "num_class_embeds": null,
36
- "only_cross_attention": false,
37
  "out_channels": 4,
38
- "sample_size": 64,
39
  "up_block_types": [
40
  "UpBlock2D",
41
  "CrossAttnUpBlock2D",
42
  "CrossAttnUpBlock2D",
43
  "CrossAttnUpBlock2D"
44
- ],
45
- "upcast_attention": false,
46
- "use_linear_projection": true
47
  }
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.4.1",
 
4
  "act_fn": "silu",
5
+ "attention_head_dim": 8,
 
 
 
 
 
6
  "block_out_channels": [
7
  320,
8
  640,
10
  1280
11
  ],
12
  "center_input_sample": false,
13
+ "cross_attention_dim": 768,
14
  "down_block_types": [
15
  "CrossAttnDownBlock2D",
16
  "CrossAttnDownBlock2D",
18
  "DownBlock2D"
19
  ],
20
  "downsample_padding": 1,
 
21
  "flip_sin_to_cos": true,
22
  "freq_shift": 0,
23
  "in_channels": 4,
25
  "mid_block_scale_factor": 1,
26
  "norm_eps": 1e-05,
27
  "norm_num_groups": 32,
 
 
28
  "out_channels": 4,
29
+ "sample_size": 32,
30
  "up_block_types": [
31
  "UpBlock2D",
32
  "CrossAttnUpBlock2D",
33
  "CrossAttnUpBlock2D",
34
  "CrossAttnUpBlock2D"
35
+ ]
 
 
36
  }
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:24d2d0a39a4cd06869c91173d507cb153f272a1a328514f70b7ce9b48cab7e2b
3
- size 3463934693
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f47e5665f0e85155a5f6f58683b04940c6b132023d584396226bf54419a78831
3
+ size 3438354725
unet/diffusion_pytorch_model.fp16.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:96edda2701914e1e248197bb205305e6aa9cfc776c3372cff9eaf62d4706a3cf
3
- size 1732107093
 
 
 
unet/diffusion_pytorch_model.fp16.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b5cdd1c15f025166ded673f898e09e621c9ff828d6508a81e83378a6d0ba8dd
3
- size 1731904736
 
 
 
unet/diffusion_pytorch_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:dda5a15fe85e6ea7fe0e21d06264611246ab60bdbf7001daa1e48028a49cd2e3
3
- size 3463726500
 
 
 
vae/config.json CHANGED
@@ -1,7 +1,6 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.10.2",
4
- "_name_or_path": "/mnt/sd-finetune-data/base/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
@@ -20,7 +19,7 @@
20
  "layers_per_block": 2,
21
  "norm_num_groups": 32,
22
  "out_channels": 3,
23
- "sample_size": 512,
24
  "up_block_types": [
25
  "UpDecoderBlock2D",
26
  "UpDecoderBlock2D",
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.4.1",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
19
  "layers_per_block": 2,
20
  "norm_num_groups": 32,
21
  "out_channels": 3,
22
+ "sample_size": 256,
23
  "up_block_types": [
24
  "UpDecoderBlock2D",
25
  "UpDecoderBlock2D",
vae/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e174991e5609bc5c2b3995e3f223fb2c5f0ae3be307fa9591b351d837a08770
3
- size 334711857
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b134cded8eb78b184aefb8805b6b572f36fa77b255c483665dda931fa0130c5
3
+ size 334707217
vae/diffusion_pytorch_model.fp16.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d207e4928394a2002bcb7fff829e93bbd2a44bc323e597fdb690d3fc2d064de2
3
- size 167405651
 
 
 
vae/diffusion_pytorch_model.fp16.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d8fb415ab4f9782232e7bb82e618e2c0cef0be3593c77a35f5733d8fdd3530f
3
- size 167335342
 
 
 
vae/diffusion_pytorch_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d55443a2d9d4d9decdbe669c51cc6d91eb6a2297477624e2e16a3054f30c2f5a
3
- size 334643276